]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xsigo: Schedule while uninterruptible
authorPradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Tue, 15 Nov 2016 22:54:07 +0000 (22:54 +0000)
committerChuck Anderson <chuck.anderson@oracle.com>
Fri, 20 Jan 2017 04:32:15 +0000 (20:32 -0800)
Orabug: 25097469

Fix the case where uVNIC driver calls msleep while
holding spinlock in case path creation failure.

Reported-by: Haakon bugge <haakon.bugge@oracle.com>
Signed-off-by: Pradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Reviewed-by: sajid zia <szia@oracle.com>
drivers/infiniband/ulp/xsigo/xscore/Makefile
drivers/infiniband/ulp/xsigo/xsvhba/Makefile
drivers/infiniband/ulp/xsigo/xsvnic/Makefile
drivers/infiniband/ulp/xsigo/xve/Makefile
drivers/infiniband/ulp/xsigo/xve/xve.h
drivers/infiniband/ulp/xsigo/xve/xve_ib.c
drivers/infiniband/ulp/xsigo/xve/xve_main.c
drivers/infiniband/ulp/xsigo/xve/xve_tables.c

index fe29e002e3a54e8f2a8218590375111e3f9271e4..cdeb3f8981a4f5aef6f724d937d0d1eebd7680a8 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XSCORE) := xscore.o
 xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
            xscore_stats.o xscore_uadm.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8038\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 85b14c8dc5e536301aefaf699395dc7506ef4471..31e1bd98e0a68626e5e94083fbd39fddb4176255 100644 (file)
@@ -3,7 +3,7 @@ xsvhba-y := vhba_main.o vhba_xsmp.o vhba_create.o vhba_init.o vhba_delete.o \
            vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o        \
            vhba_scsi_intf.o vhba_align.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8038\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index b5daeb1bdc388b2e362062acea98d7024c4146ef..3801f0c8c0f33a20df55d6bc5e7dbd7a79384615 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
 xsvnic-y := xsvnic_main.o xsvnic_stats.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8038\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 892ce1385347b949429e13aab120731dbb56d7a9..b2a624319837394ec8964c56c55e8cf70a088176 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XVE) := xve.o
 xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
         xve_ethtool.o xve_cm.o xve_stats.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8038\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 03d8ea1c0796a260a08f5f8b57c239aaf621fd48..5c5a3cfb4a3abafece0a4addbe088bb6942ac39f 100644 (file)
@@ -1166,8 +1166,6 @@ void queue_sm_work(struct xve_dev_priv *priv, int msecs);
 void queue_age_work(struct xve_dev_priv *priv, int msecs);
 
 void xve_mark_paths_invalid(struct net_device *dev);
-void xve_flush_paths(struct net_device *dev);
-void xve_flush_single_path(struct net_device *dev, struct xve_path *path);
 void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid);
 struct xve_dev_priv *xve_intf_alloc(const char *format);
 
index f08c509df08fcfbbf07b5a142ca7cc9a3e82aed8..a5d333c21db3ee24cce36c8c01491d3f5be5a1f0 100644 (file)
@@ -1007,6 +1007,7 @@ int xve_ib_dev_up(struct net_device *dev)
 int xve_ib_dev_down(struct net_device *dev, int flush)
 {
        struct xve_dev_priv *priv = netdev_priv(dev);
+       struct xve_path *path, *tp;
 
        xve_debug(DEBUG_IBDEV_INFO, priv, "%s downing ib_dev\n", __func__);
        if (!test_and_clear_bit(XVE_FLAG_OPER_UP, &priv->flags)) {
@@ -1027,7 +1028,10 @@ int xve_ib_dev_down(struct net_device *dev, int flush)
        xve_mcast_stop_thread(dev, flush);
        xve_mcast_dev_flush(dev);
 
-       xve_flush_paths(dev);
+       /* Flush all Paths */
+       list_for_each_entry_safe(path, tp, &priv->path_list, list)
+               xve_flush_single_path_by_gid(dev, &path->pathrec.dgid);
+
 
        return 0;
 }
index a5a6e4b0019e1925d928ffd882e3bc42e214efc1..c9fc3bed22c65b3f03b5cb045fbbf14cf9122a89 100644 (file)
@@ -347,7 +347,7 @@ inline void xve_put_path(struct xve_path *path)
        atomic_dec_if_positive(&path->users);
 }
 
-inline void xve_free_path(struct xve_path *path, int do_lock)
+inline void xve_free_path(struct xve_path *path)
 {
        struct xve_dev_priv *priv;
        struct net_device *netdev;
@@ -362,20 +362,16 @@ inline void xve_free_path(struct xve_path *path, int do_lock)
        while ((skb = __skb_dequeue(&path->uplink_queue)))
                dev_kfree_skb_irq(skb);
 
-       if (do_lock)
-               spin_lock_irqsave(&priv->lock, flags);
-       if (xve_cmtx_get(path)) {
-               if (do_lock)
-                       spin_unlock_irqrestore(&priv->lock, flags);
+       netif_tx_lock_bh(netdev);
+       if (xve_cmtx_get(path))
                xve_cm_destroy_tx_deferred(xve_cmtx_get(path));
-               if (do_lock)
-                       spin_lock_irqsave(&priv->lock, flags);
-       }
+       netif_tx_unlock_bh(netdev);
+
+       spin_lock_irqsave(&priv->lock, flags);
        xve_flush_l2_entries(netdev, path);
        if (path->ah)
                xve_put_ah(path->ah);
-       if (do_lock)
-               spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
        kfree(path);
 }
@@ -487,16 +483,6 @@ void xve_mark_paths_invalid(struct net_device *dev)
        spin_unlock_irq(&priv->lock);
 }
 
-void xve_flush_paths(struct net_device *dev)
-{
-       struct xve_dev_priv *priv = netdev_priv(dev);
-       struct xve_path *path, *tp;
-
-       list_for_each_entry_safe(path, tp, &priv->path_list, list) {
-               xve_flush_single_path(dev, path);
-       }
-
-}
 
 void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid)
 {
@@ -533,9 +519,10 @@ void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid)
 
        wait_for_completion(&path->done);
        list_del(&path->list);
+
        /* Make sure path is not in use */
        if (atomic_dec_if_positive(&path->users) <= 0)
-               xve_free_path(path, 1);
+               xve_free_path(path);
        else {
                /* Wait for path->users to become zero */
                unsigned long begin = jiffies;
@@ -549,7 +536,7 @@ void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid)
                        msleep(20);
                }
                if (atomic_read(&path->users) == 0)
-                       xve_free_path(path, 1);
+                       xve_free_path(path);
 
        }
 timeout:
@@ -557,11 +544,6 @@ timeout:
 
 }
 
-void xve_flush_single_path(struct net_device *dev, struct xve_path *path)
-{
-       xve_flush_single_path_by_gid(dev, &path->pathrec.dgid);
-}
-
 static void path_rec_completion(int status,
                                struct ib_sa_path_rec *pathrec, void *path_ptr)
 {
@@ -790,17 +772,22 @@ xve_path_lookup(struct net_device *dev,
        if (!path)
                return NULL;
 
-       spin_lock_irqsave(&xve_fwt->lock, flags);
-       fwt_entry->path = path;
-       list_add_tail(&fwt_entry->list, &path->fwt_list);
-       spin_unlock_irqrestore(&xve_fwt->lock, flags);
        if (!path->ah) {
                if (!path->query && path_rec_start(dev, path)) {
-                       xve_free_path(path, 0);
+                       /*
+                        * Forwarding entry not yet added to the path fwt_list
+                        * just free that path
+                        */
+                       kfree(path);
                        return NULL;
                }
        }
 
+       spin_lock_irqsave(&xve_fwt->lock, flags);
+       fwt_entry->path = path;
+       list_add_tail(&fwt_entry->list, &path->fwt_list);
+       spin_unlock_irqrestore(&xve_fwt->lock, flags);
+
        return path;
 }
 
index 37e5db4896ee400ca9d7b6ffd722f56ac6df005c..f736bd6a1f5692a49a86eba73c1cc6fa70cce7f5 100644 (file)
@@ -368,7 +368,6 @@ void xve_fwt_entry_destroy(struct xve_dev_priv *priv,
                           struct xve_fwt_entry *fwt_entry)
 {
        xve_remove_fwt_entry(priv, fwt_entry);
-       /* Function gets cald with Lock held always */
        xve_fwt_entry_free(priv, fwt_entry);
 }