]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
IB/{cm,ipoib}: Manage ACL tables
authorYuval Shaia <yuval.shaia@oracle.com>
Mon, 4 Apr 2016 10:56:50 +0000 (13:56 +0300)
committerChuck Anderson <chuck.anderson@oracle.com>
Tue, 12 Jul 2016 19:46:15 +0000 (12:46 -0700)
Add support for ACL tables for ib_ipoib and ib_cm drivers.
ib_cm driver exposes functions register and unregister tables and to manage
tables content.
In ib_ipoib driver add ACL object for each network device.

Orabug: 23222944

Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
Reviewed-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
drivers/infiniband/core/cm.c
drivers/infiniband/ulp/ipoib/Makefile
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_acl.c [new file with mode: 0644]
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
include/rdma/ib_cm.h

index c33f135a973b39edeba26f889d09b3fc10714d0b..63d7a7af32a78e260d8ad0b5d99837dcd1b6c4a2 100644 (file)
@@ -85,6 +85,7 @@ static struct ib_cm {
        __be32 random_id_operand;
        struct list_head timewait_list;
        struct workqueue_struct *wq;
+       struct list_head dpp_acl_map;
 } cm;
 
 /* Counter indexes ordered by attribute ID */
@@ -752,6 +753,271 @@ static void cm_free_work(struct cm_work *work)
        kfree(work);
 }
 
+void ib_cm_acl_init(struct ib_cm_acl *acl)
+{
+       acl->enabled = 0;
+       acl->allowed_list = RB_ROOT;
+       acl->list_count = 0;
+       spin_lock_init(&acl->lock);
+}
+EXPORT_SYMBOL(ib_cm_acl_init);
+
+int ib_cm_acl_insert(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid, u32 ip,
+                    const char *uuid)
+{
+       struct ib_cm_acl_elem *elem;
+       struct rb_node **new, *parent = NULL;
+       int rc = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+
+       new = &(acl->allowed_list.rb_node);
+
+       while (*new) {
+               elem = rb_entry(*new, struct ib_cm_acl_elem, node);
+
+               parent = *new;
+               if ((guid == elem->guid) &&
+                   (subnet_prefix == elem->subnet_prefix)) {
+                       elem->ref_count++;
+                       goto exist;
+               }
+
+               if (subnet_prefix == elem->subnet_prefix)
+                       if (guid > elem->guid)
+                               new = &((*new)->rb_right);
+                       else
+                               new = &((*new)->rb_left);
+               else if (subnet_prefix > elem->subnet_prefix)
+                       new = &((*new)->rb_right);
+               else
+                       new = &((*new)->rb_left);
+       }
+
+       elem = kmalloc(sizeof(struct ib_cm_acl_elem), GFP_ATOMIC);
+       if (!elem)
+               goto err_nomem;
+       elem->guid = guid;
+       elem->subnet_prefix = subnet_prefix;
+       elem->ip = ip;
+       memcpy(elem->uuid, uuid, UUID_SZ);
+       elem->ref_count = 1;
+       rb_link_node(&elem->node, parent, new);
+       rb_insert_color(&elem->node, &acl->allowed_list);
+       acl->list_count++;
+
+       goto out;
+
+err_nomem:;
+       rc = -ENOMEM;
+
+exist:
+
+out:
+       spin_unlock_irqrestore(&acl->lock, flags);
+       return rc;
+}
+EXPORT_SYMBOL(ib_cm_acl_insert);
+
+struct ib_cm_acl_elem *_ib_cm_acl_lookup(struct ib_cm_acl *acl,
+                                        u64 subnet_prefix, u64 guid)
+{
+       struct rb_node *node;
+       struct ib_cm_acl_elem *elem;
+
+       node = acl->allowed_list.rb_node;
+
+       while (node) {
+               elem = rb_entry(node, struct ib_cm_acl_elem, node);
+               if ((guid == elem->guid) &&
+                   (subnet_prefix == elem->subnet_prefix))
+                       return elem;
+
+               if (subnet_prefix == elem->subnet_prefix)
+                       if (guid > elem->guid)
+                               node = node->rb_right;
+                       else
+                               node = node->rb_left;
+               else if (subnet_prefix > elem->subnet_prefix)
+                       node = node->rb_right;
+               else
+                       node = node->rb_left;
+       }
+
+       return NULL;
+}
+
+struct ib_cm_acl_elem *ib_cm_acl_lookup(struct ib_cm_acl *acl,
+                                       u64 subnet_prefix, u64 guid)
+{
+       struct ib_cm_acl_elem *elem;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+       elem = _ib_cm_acl_lookup(acl, subnet_prefix, guid);
+       spin_unlock_irqrestore(&acl->lock, flags);
+
+       return elem;
+}
+EXPORT_SYMBOL(ib_cm_acl_lookup);
+
+struct ib_cm_acl_elem *ib_cm_acl_lookup_uuid_ip(struct ib_cm_acl *acl,
+                                               char *uuid, u32 ip)
+{
+       struct ib_cm_acl_elem *elem, *ret = NULL;
+       struct rb_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+       node = rb_first(&acl->allowed_list);
+       while (node) {
+               elem = container_of(node, struct ib_cm_acl_elem, node);
+               if ((ip == elem->ip) && (!memcmp(uuid, elem->uuid, UUID_SZ))) {
+                       ret = elem;
+                       goto out;
+               }
+               node = rb_next(node);
+       }
+
+out:
+       spin_unlock_irqrestore(&acl->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(ib_cm_acl_lookup_uuid_ip);
+
+int ib_cm_acl_delete(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid)
+{
+       struct ib_cm_acl_elem *elem;
+       int ref_count = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+       elem = _ib_cm_acl_lookup(acl, subnet_prefix, guid);
+       if (elem) {
+               elem->ref_count--;
+               ref_count = elem->ref_count;
+               if (elem->ref_count == 0) {
+                       rb_erase(&elem->node, &acl->allowed_list);
+                       kfree(elem);
+                       acl->list_count--;
+               }
+       }
+       spin_unlock_irqrestore(&acl->lock, flags);
+       return ref_count;
+}
+EXPORT_SYMBOL(ib_cm_acl_delete);
+
+void ib_cm_acl_scan(struct ib_cm_acl *acl, struct ib_cm_acl_elem **list,
+                   ssize_t *list_count)
+{
+       struct ib_cm_acl_elem *elem, *list_elem;
+       struct rb_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+       *list = kmalloc_array(acl->list_count, sizeof(struct ib_cm_acl_elem),
+                             GFP_ATOMIC);
+       if (!*list) {
+               *list_count = 0;
+               spin_unlock_irqrestore(&acl->lock, flags);
+               pr_crit("Fail to allocate memory for acl_scan\n");
+               return;
+       }
+       list_elem = *list;
+       node = rb_first(&acl->allowed_list);
+       while (node) {
+               elem = container_of(node, struct ib_cm_acl_elem, node);
+               list_elem->guid = elem->guid;
+               list_elem->subnet_prefix = elem->subnet_prefix;
+               list_elem->ip = elem->ip;
+               memcpy(list_elem->uuid, elem->uuid, UUID_SZ);
+               list_elem->ref_count = elem->ref_count;
+               list_elem++;
+               node = rb_next(node);
+       }
+
+       *list_count = acl->list_count;
+       spin_unlock_irqrestore(&acl->lock, flags);
+}
+EXPORT_SYMBOL(ib_cm_acl_scan);
+
+void ib_cm_acl_clean(struct ib_cm_acl *acl)
+{
+       struct ib_cm_acl_elem *elem;
+       struct rb_node *node, *curr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&acl->lock, flags);
+       node = rb_first(&acl->allowed_list);
+       while (node) {
+               curr = node;
+               node = rb_next(node);
+               elem = container_of(curr, struct ib_cm_acl_elem, node);
+               rb_erase(curr, &acl->allowed_list);
+               kfree(elem);
+       }
+
+       acl->list_count = 0;
+       spin_unlock_irqrestore(&acl->lock, flags);
+}
+EXPORT_SYMBOL(ib_cm_acl_clean);
+
+int ib_cm_register_acl(struct ib_cm_acl *acl, struct ib_cm_dpp *dpp)
+{
+       struct ib_cm_dpp_acl *dpp_acl;
+
+       dpp_acl = kzalloc(sizeof(struct ib_cm_dpp_acl), GFP_KERNEL);
+       if (unlikely(!dpp_acl))
+               return -ENOMEM;
+
+       ib_cm_dpp_dbg("Registering ACL", dpp);
+
+       ib_cm_dpp_copy(&dpp_acl->dpp, dpp);
+       dpp_acl->acl = acl;
+       list_add(&dpp_acl->list, &cm.dpp_acl_map);
+
+       return 0;
+}
+EXPORT_SYMBOL(ib_cm_register_acl);
+
+struct ib_cm_acl *ib_cm_dpp_acl_lookup(struct ib_cm_dpp *dpp)
+{
+       struct ib_cm_dpp_acl *dpp_acl;
+
+       list_for_each_entry(dpp_acl, &cm.dpp_acl_map, list) {
+               if (ib_cm_dpp_compare(&dpp_acl->dpp, dpp))
+                       return dpp_acl->acl;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(ib_cm_dpp_acl_lookup);
+
+void ib_cm_unregister_acl(struct ib_cm_acl *acl)
+{
+       struct ib_cm_dpp_acl *dpp_acl, *tmp;
+
+       list_for_each_entry_safe(dpp_acl, tmp, &cm.dpp_acl_map, list) {
+               if (dpp_acl->acl == acl) {
+                       ib_cm_dpp_dbg("Unregistering ACL", &dpp_acl->dpp);
+                       list_del(&dpp_acl->list);
+                       kfree(dpp_acl);
+               }
+       }
+}
+EXPORT_SYMBOL(ib_cm_unregister_acl);
+
+static void ib_cm_dpp_acl_cleanup(void)
+{
+       struct ib_cm_dpp_acl *dpp_acl;
+
+       list_for_each_entry(dpp_acl, &cm.dpp_acl_map, list) {
+               kfree(dpp_acl);
+       }
+}
+
 static inline int cm_convert_to_ms(int iba_time)
 {
        /* approximate conversion to ms from 4.096us x 2^iba_time */
@@ -3908,6 +4174,7 @@ static int __init ib_cm_init(void)
        idr_init(&cm.local_id_table);
        get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
        INIT_LIST_HEAD(&cm.timewait_list);
+       INIT_LIST_HEAD(&cm.dpp_acl_map);
 
        ret = class_register(&cm_class);
        if (ret) {
@@ -3939,6 +4206,8 @@ static void __exit ib_cm_cleanup(void)
 {
        struct cm_timewait_info *timewait_info, *tmp;
 
+       ib_cm_dpp_acl_cleanup();
+
        spin_lock_irq(&cm.lock);
        list_for_each_entry(timewait_info, &cm.timewait_list, list)
                cancel_delayed_work(&timewait_info->work.work);
index e5430dd50764c9c6e13b871a51ee8c273a3f9941..a4145f2b31d9a084c2ae1d40e7be1965d02de4e5 100644 (file)
@@ -6,6 +6,7 @@ ib_ipoib-y                                      := ipoib_main.o \
                                                   ipoib_verbs.o \
                                                   ipoib_vlan.o \
                                                   ipoib_ethtool.o \
+                                                  ipoib_acl.o \
                                                   ipoib_netlink.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM)         += ipoib_cm.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG)      += ipoib_fs.o
index 4fa88783e8d40863fa0cd2169a3440c3cf84c2cf..1c254e6e15868c2c78d7b8ff7a5ab782a77c4e07 100644 (file)
@@ -51,6 +51,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
 #include <linux/sched.h>
 
 /* constants */
@@ -414,6 +415,7 @@ struct ipoib_dev_priv {
        unsigned max_send_sge;
        /* Device specific; obtained from query_device */
        unsigned max_sge;
+       struct ib_cm_acl acl;
 };
 
 struct ipoib_ah {
@@ -596,6 +598,8 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
 /* We don't support UC connections at the moment */
 #define IPOIB_CM_SUPPORTED(ha)   (ha[0] & (IPOIB_FLAGS_RC))
 
+void ipoib_init_acl(struct net_device *dev);
+void ipoib_clean_acl(struct net_device *dev);
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
 
 extern int ipoib_max_conn_qp;
@@ -777,6 +781,8 @@ static inline void ipoib_unregister_debugfs(void) { }
        printk(level "%s: " format, ipoib_dev_name(priv), ## arg)
 #define ipoib_warn(priv, format, arg...)               \
        ipoib_printk(KERN_WARNING, priv, format , ## arg)
+#define ipoib_err(priv, format, arg...)                \
+       ipoib_printk(KERN_ERR, priv, format, ## arg)
 
 #define ipoib_warn_ratelimited(priv, format, arg...) \
        pr_warn_ratelimited("%s: " format, ipoib_dev_name(priv), ## arg)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_acl.c b/drivers/infiniband/ulp/ipoib/ipoib_acl.c
new file mode 100644 (file)
index 0000000..8ff04a6
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipoib.h"
+
+void ipoib_init_acl(struct net_device *dev)
+{
+       struct ib_cm_dpp dpp;
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       ipoib_dbg(priv, "Initializing ACL for device %s\n", dev->name);
+       ib_cm_acl_init(&priv->acl);
+       ib_cm_dpp_init(&dpp, priv->ca, priv->port, priv->pkey);
+       ib_cm_register_acl(&priv->acl, &dpp);
+}
+
+void ipoib_clean_acl(struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       ipoib_dbg(priv, "Clean ACL for device %s\n", dev->name);
+       ib_cm_unregister_acl(&priv->acl);
+       ib_cm_acl_clean(&priv->acl);
+}
index b594f5171255fbc4e779be2a67d36e5202ce6802..02e3cc88aea49af9aa2a4e09a343fccf8bbe0efa 100644 (file)
@@ -132,7 +132,7 @@ int ipoib_open(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
-       ipoib_dbg(priv, "bringing up interface\n");
+       ipoib_dbg(priv, "bringing up interface %s\n", dev->name);
 
        netif_carrier_off(dev);
 
@@ -1394,6 +1394,7 @@ void ipoib_dev_cleanup(struct net_device *dev)
 
        /* Delete any child interfaces first */
        list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+               ipoib_clean_acl(cpriv->dev);
                /* Stop GC on child */
                set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
                cancel_delayed_work(&cpriv->neigh_reap_task);
@@ -1407,6 +1408,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
         */
        ipoib_neigh_hash_uninit(dev);
 
+       ipoib_clean_acl(priv->dev);
+
        ipoib_ib_dev_cleanup(dev);
 
        kfree(priv->rx_ring);
@@ -1857,6 +1860,8 @@ static struct net_device *ipoib_add_port(const char *format,
                goto event_failed;
        }
 
+       ipoib_init_acl(priv->dev);
+
        result = register_netdev(priv->dev);
        if (result) {
                printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
index 2dddd0c8b0b839521409fcf7ce3b4c8cff70969d..9467032976cb340428c118ef5c80059ce025756c 100644 (file)
@@ -79,6 +79,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
                goto err;
        }
 
+       ipoib_init_acl(priv->dev);
+
        result = register_netdevice(priv->dev);
        if (result) {
                ipoib_warn(priv, "failed to initialize; error %i", result);
index 39ed2d2fbd51452216586b031a3e25d236099169..7c06afdd01bfba51390448774591c6be7165dd8b 100644 (file)
@@ -35,6 +35,8 @@
 #if !defined(IB_CM_H)
 #define IB_CM_H
 
+#include <linux/spinlock.h>
+
 #include <rdma/ib_mad.h>
 #include <rdma/ib_sa.h>
 
@@ -275,6 +277,86 @@ struct ib_cm_event {
 #define CM_LAP_ATTR_ID         cpu_to_be16(0x0019)
 #define CM_APR_ATTR_ID         cpu_to_be16(0x001A)
 
+/**
+ * This struct is used to hold unique IB interface identifiers
+ */
+struct ib_cm_dpp {
+       struct ib_device        *device;
+       u8                      port;
+       u16                     pkey;
+};
+
+static inline void ib_cm_dpp_init(struct ib_cm_dpp *dpp,
+                                 struct ib_device *device, u8 port, u16 pkey)
+{
+       dpp->device = device;
+       dpp->port = port;
+       dpp->pkey = pkey;
+}
+
+static inline void ib_cm_dpp_copy(struct ib_cm_dpp *ddpp,
+                                 struct ib_cm_dpp *sdpp)
+{
+       ddpp->device = sdpp->device;
+       ddpp->port = sdpp->port;
+       ddpp->pkey = sdpp->pkey;
+}
+
+static inline bool ib_cm_dpp_compare(struct ib_cm_dpp *dpp1,
+                                    struct ib_cm_dpp *dpp2)
+{
+       return ((dpp1->device == dpp2->device) && (dpp1->port == dpp2->port) &&
+               (dpp1->pkey == dpp2->pkey));
+}
+
+static inline void ib_cm_dpp_dbg(char *msg, struct ib_cm_dpp *dpp)
+{
+       pr_debug("%s: %s, %d, 0x%x\n", msg, dpp->device->name, dpp->port,
+                dpp->pkey);
+}
+
+#define UUID_SZ 64
+struct ib_cm_acl_elem {
+       struct rb_node  node;
+       u64             guid;
+       u64             subnet_prefix;
+       u32             ip;
+       char            uuid[UUID_SZ];
+       int             ref_count;
+};
+
+struct ib_cm_acl {
+       bool            enabled;
+       struct rb_root  allowed_list;
+       ssize_t         list_count;
+       spinlock_t      lock;
+};
+
+void ib_cm_acl_init(struct ib_cm_acl *acl);
+int ib_cm_acl_insert(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid, u32 ip,
+                    const char *uuid);
+struct ib_cm_acl_elem *ib_cm_acl_lookup(struct ib_cm_acl *acl,
+                                       u64 subnet_prefix, u64 guid);
+struct ib_cm_acl_elem *ib_cm_acl_lookup_uuid_ip(struct ib_cm_acl *acl,
+                                               char *uuid, u32 ip);
+int ib_cm_acl_delete(struct ib_cm_acl *acl, u64 subnet_prefix, u64 guid);
+void ib_cm_acl_scan(struct ib_cm_acl *acl, struct ib_cm_acl_elem **list,
+                   ssize_t *list_count);
+void ib_cm_acl_clean(struct ib_cm_acl *acl);
+
+/**
+ * This table map dpp to acl
+ */
+struct ib_cm_dpp_acl {
+       struct list_head        list;
+       struct ib_cm_dpp        dpp;
+       struct ib_cm_acl        *acl;
+};
+
+int ib_cm_register_acl(struct ib_cm_acl *acl, struct ib_cm_dpp *dpp);
+struct ib_cm_acl *ib_cm_dpp_acl_lookup(struct ib_cm_dpp *dpp);
+void ib_cm_unregister_acl(struct ib_cm_acl *acl);
+
 /**
  * ib_cm_handler - User-defined callback to process communication events.
  * @cm_id: Communication identifier associated with the reported event.