__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
+ struct list_head dpp_acl_map;
} cm;
/* Counter indexes ordered by attribute ID */
kfree(work);
}
+void ib_cm_acl_init(struct ib_cm_acl *acl)
+{
+ acl->enabled = 0;
+ acl->allowed_list = RB_ROOT;
+ acl->list_count = 0;
+ spin_lock_init(&acl->lock);
+}
+EXPORT_SYMBOL(ib_cm_acl_init);
+
+int ib_cm_acl_insert(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid, u32 ip,
+ const char *uuid)
+{
+ struct ib_cm_acl_elem *elem;
+ struct rb_node **new, *parent = NULL;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+
+ new = &(acl->allowed_list.rb_node);
+
+ while (*new) {
+ elem = rb_entry(*new, struct ib_cm_acl_elem, node);
+
+ parent = *new;
+ if ((guid == elem->guid) &&
+ (subnet_prefix == elem->subnet_prefix)) {
+ elem->ref_count++;
+ goto exist;
+ }
+
+ if (subnet_prefix == elem->subnet_prefix)
+ if (guid > elem->guid)
+ new = &((*new)->rb_right);
+ else
+ new = &((*new)->rb_left);
+ else if (subnet_prefix > elem->subnet_prefix)
+ new = &((*new)->rb_right);
+ else
+ new = &((*new)->rb_left);
+ }
+
+ elem = kmalloc(sizeof(struct ib_cm_acl_elem), GFP_ATOMIC);
+ if (!elem)
+ goto err_nomem;
+ elem->guid = guid;
+ elem->subnet_prefix = subnet_prefix;
+ elem->ip = ip;
+ memcpy(elem->uuid, uuid, UUID_SZ);
+ elem->ref_count = 1;
+ rb_link_node(&elem->node, parent, new);
+ rb_insert_color(&elem->node, &acl->allowed_list);
+ acl->list_count++;
+
+ goto out;
+
+err_nomem:;
+ rc = -ENOMEM;
+
+exist:
+
+out:
+ spin_unlock_irqrestore(&acl->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(ib_cm_acl_insert);
+
+struct ib_cm_acl_elem *_ib_cm_acl_lookup(struct ib_cm_acl *acl,
+ u64 subnet_prefix, u64 guid)
+{
+ struct rb_node *node;
+ struct ib_cm_acl_elem *elem;
+
+ node = acl->allowed_list.rb_node;
+
+ while (node) {
+ elem = rb_entry(node, struct ib_cm_acl_elem, node);
+ if ((guid == elem->guid) &&
+ (subnet_prefix == elem->subnet_prefix))
+ return elem;
+
+ if (subnet_prefix == elem->subnet_prefix)
+ if (guid > elem->guid)
+ node = node->rb_right;
+ else
+ node = node->rb_left;
+ else if (subnet_prefix > elem->subnet_prefix)
+ node = node->rb_right;
+ else
+ node = node->rb_left;
+ }
+
+ return NULL;
+}
+
+struct ib_cm_acl_elem *ib_cm_acl_lookup(struct ib_cm_acl *acl,
+ u64 subnet_prefix, u64 guid)
+{
+ struct ib_cm_acl_elem *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+ elem = _ib_cm_acl_lookup(acl, subnet_prefix, guid);
+ spin_unlock_irqrestore(&acl->lock, flags);
+
+ return elem;
+}
+EXPORT_SYMBOL(ib_cm_acl_lookup);
+
+struct ib_cm_acl_elem *ib_cm_acl_lookup_uuid_ip(struct ib_cm_acl *acl,
+ char *uuid, u32 ip)
+{
+ struct ib_cm_acl_elem *elem, *ret = NULL;
+ struct rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+ node = rb_first(&acl->allowed_list);
+ while (node) {
+ elem = container_of(node, struct ib_cm_acl_elem, node);
+ if ((ip == elem->ip) && (!memcmp(uuid, elem->uuid, UUID_SZ))) {
+ ret = elem;
+ goto out;
+ }
+ node = rb_next(node);
+ }
+
+out:
+ spin_unlock_irqrestore(&acl->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_cm_acl_lookup_uuid_ip);
+
+int ib_cm_acl_delete(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid)
+{
+ struct ib_cm_acl_elem *elem;
+ int ref_count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+ elem = _ib_cm_acl_lookup(acl, subnet_prefix, guid);
+ if (elem) {
+ elem->ref_count--;
+ ref_count = elem->ref_count;
+ if (elem->ref_count == 0) {
+ rb_erase(&elem->node, &acl->allowed_list);
+ kfree(elem);
+ acl->list_count--;
+ }
+ }
+ spin_unlock_irqrestore(&acl->lock, flags);
+ return ref_count;
+}
+EXPORT_SYMBOL(ib_cm_acl_delete);
+
+void ib_cm_acl_scan(struct ib_cm_acl *acl, struct ib_cm_acl_elem **list,
+ ssize_t *list_count)
+{
+ struct ib_cm_acl_elem *elem, *list_elem;
+ struct rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+ *list = kmalloc_array(acl->list_count, sizeof(struct ib_cm_acl_elem),
+ GFP_ATOMIC);
+ if (!*list) {
+ *list_count = 0;
+ spin_unlock_irqrestore(&acl->lock, flags);
+ pr_crit("Fail to allocate memory for acl_scan\n");
+ return;
+ }
+ list_elem = *list;
+ node = rb_first(&acl->allowed_list);
+ while (node) {
+ elem = container_of(node, struct ib_cm_acl_elem, node);
+ list_elem->guid = elem->guid;
+ list_elem->subnet_prefix = elem->subnet_prefix;
+ list_elem->ip = elem->ip;
+ memcpy(list_elem->uuid, elem->uuid, UUID_SZ);
+ list_elem->ref_count = elem->ref_count;
+ list_elem++;
+ node = rb_next(node);
+ }
+
+ *list_count = acl->list_count;
+ spin_unlock_irqrestore(&acl->lock, flags);
+}
+EXPORT_SYMBOL(ib_cm_acl_scan);
+
+void ib_cm_acl_clean(struct ib_cm_acl *acl)
+{
+ struct ib_cm_acl_elem *elem;
+ struct rb_node *node, *curr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acl->lock, flags);
+ node = rb_first(&acl->allowed_list);
+ while (node) {
+ curr = node;
+ node = rb_next(node);
+ elem = container_of(curr, struct ib_cm_acl_elem, node);
+ rb_erase(curr, &acl->allowed_list);
+ kfree(elem);
+ }
+
+ acl->list_count = 0;
+ spin_unlock_irqrestore(&acl->lock, flags);
+}
+EXPORT_SYMBOL(ib_cm_acl_clean);
+
+int ib_cm_register_acl(struct ib_cm_acl *acl, struct ib_cm_dpp *dpp)
+{
+ struct ib_cm_dpp_acl *dpp_acl;
+
+ dpp_acl = kzalloc(sizeof(struct ib_cm_dpp_acl), GFP_KERNEL);
+ if (unlikely(!dpp_acl))
+ return -ENOMEM;
+
+ ib_cm_dpp_dbg("Registering ACL", dpp);
+
+ ib_cm_dpp_copy(&dpp_acl->dpp, dpp);
+ dpp_acl->acl = acl;
+ list_add(&dpp_acl->list, &cm.dpp_acl_map);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_cm_register_acl);
+
+struct ib_cm_acl *ib_cm_dpp_acl_lookup(struct ib_cm_dpp *dpp)
+{
+ struct ib_cm_dpp_acl *dpp_acl;
+
+ list_for_each_entry(dpp_acl, &cm.dpp_acl_map, list) {
+ if (ib_cm_dpp_compare(&dpp_acl->dpp, dpp))
+ return dpp_acl->acl;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ib_cm_dpp_acl_lookup);
+
+void ib_cm_unregister_acl(struct ib_cm_acl *acl)
+{
+ struct ib_cm_dpp_acl *dpp_acl, *tmp;
+
+ list_for_each_entry_safe(dpp_acl, tmp, &cm.dpp_acl_map, list) {
+ if (dpp_acl->acl == acl) {
+ ib_cm_dpp_dbg("Unregistering ACL", &dpp_acl->dpp);
+ list_del(&dpp_acl->list);
+ kfree(dpp_acl);
+ }
+ }
+}
+EXPORT_SYMBOL(ib_cm_unregister_acl);
+
+static void ib_cm_dpp_acl_cleanup(void)
+{
+ struct ib_cm_dpp_acl *dpp_acl;
+
+ list_for_each_entry(dpp_acl, &cm.dpp_acl_map, list) {
+ kfree(dpp_acl);
+ }
+}
+
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
+ INIT_LIST_HEAD(&cm.dpp_acl_map);
ret = class_register(&cm_class);
if (ret) {
{
struct cm_timewait_info *timewait_info, *tmp;
+ ib_cm_dpp_acl_cleanup();
+
spin_lock_irq(&cm.lock);
list_for_each_entry(timewait_info, &cm.timewait_list, list)
cancel_delayed_work(&timewait_info->work.work);
--- /dev/null
+/*
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipoib.h"
+
+void ipoib_init_acl(struct net_device *dev)
+{
+ struct ib_cm_dpp dpp;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ ipoib_dbg(priv, "Initializing ACL for device %s\n", dev->name);
+ ib_cm_acl_init(&priv->acl);
+ ib_cm_dpp_init(&dpp, priv->ca, priv->port, priv->pkey);
+ ib_cm_register_acl(&priv->acl, &dpp);
+}
+
+void ipoib_clean_acl(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ ipoib_dbg(priv, "Clean ACL for device %s\n", dev->name);
+ ib_cm_unregister_acl(&priv->acl);
+ ib_cm_acl_clean(&priv->acl);
+}
#if !defined(IB_CM_H)
#define IB_CM_H
+#include <linux/spinlock.h>
+
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
+/**
+ * This struct is used to hold unique IB interface identifiers
+ */
+struct ib_cm_dpp {
+ struct ib_device *device;
+ u8 port;
+ u16 pkey;
+};
+
+static inline void ib_cm_dpp_init(struct ib_cm_dpp *dpp,
+ struct ib_device *device, u8 port, u16 pkey)
+{
+ dpp->device = device;
+ dpp->port = port;
+ dpp->pkey = pkey;
+}
+
+static inline void ib_cm_dpp_copy(struct ib_cm_dpp *ddpp,
+ struct ib_cm_dpp *sdpp)
+{
+ ddpp->device = sdpp->device;
+ ddpp->port = sdpp->port;
+ ddpp->pkey = sdpp->pkey;
+}
+
+static inline bool ib_cm_dpp_compare(struct ib_cm_dpp *dpp1,
+ struct ib_cm_dpp *dpp2)
+{
+ return ((dpp1->device == dpp2->device) && (dpp1->port == dpp2->port) &&
+ (dpp1->pkey == dpp2->pkey));
+}
+
+static inline void ib_cm_dpp_dbg(char *msg, struct ib_cm_dpp *dpp)
+{
+ pr_debug("%s: %s, %d, 0x%x\n", msg, dpp->device->name, dpp->port,
+ dpp->pkey);
+}
+
+#define UUID_SZ 64
+struct ib_cm_acl_elem {
+ struct rb_node node;
+ u64 guid;
+ u64 subnet_prefix;
+ u32 ip;
+ char uuid[UUID_SZ];
+ int ref_count;
+};
+
+struct ib_cm_acl {
+ bool enabled;
+ struct rb_root allowed_list;
+ ssize_t list_count;
+ spinlock_t lock;
+};
+
+void ib_cm_acl_init(struct ib_cm_acl *acl);
+int ib_cm_acl_insert(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid, u32 ip,
+ const char *uuid);
+struct ib_cm_acl_elem *ib_cm_acl_lookup(struct ib_cm_acl *acl,
+ u64 subnet_prefix, u64 guid);
+struct ib_cm_acl_elem *ib_cm_acl_lookup_uuid_ip(struct ib_cm_acl *acl,
+ char *uuid, u32 ip);
+int ib_cm_acl_delete(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid);
+void ib_cm_acl_scan(struct ib_cm_acl *acl, struct ib_cm_acl_elem **list,
+ ssize_t *list_count);
+void ib_cm_acl_clean(struct ib_cm_acl *acl);
+
+/**
+ * This table map dpp to acl
+ */
+struct ib_cm_dpp_acl {
+ struct list_head list;
+ struct ib_cm_dpp dpp;
+ struct ib_cm_acl *acl;
+};
+
+int ib_cm_register_acl(struct ib_cm_acl *acl, struct ib_cm_dpp *dpp);
+struct ib_cm_acl *ib_cm_dpp_acl_lookup(struct ib_cm_dpp *dpp);
+void ib_cm_unregister_acl(struct ib_cm_acl *acl);
+
/**
* ib_cm_handler - User-defined callback to process communication events.
* @cm_id: Communication identifier associated with the reported event.