bool pnetid_by_user;
        struct list_head lgr_list;
        spinlock_t lgr_lock;
+       u8 going_away : 1;
 };
 
 struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
 
        return 0;
 }
 
+static void smc_core_going_away(void)
+{
+       struct smc_ib_device *smcibdev;
+       struct smcd_dev *smcd;
+
+       spin_lock(&smc_ib_devices.lock);
+       list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
+               int i;
+
+               for (i = 0; i < SMC_MAX_PORTS; i++)
+                       set_bit(i, smcibdev->ports_going_away);
+       }
+       spin_unlock(&smc_ib_devices.lock);
+
+       spin_lock(&smcd_dev_list.lock);
+       list_for_each_entry(smcd, &smcd_dev_list.list, list) {
+               smcd->going_away = 1;
+       }
+       spin_unlock(&smcd_dev_list.lock);
+}
+
 /* Called (from smc_exit) when module is removed */
 void smc_core_exit(void)
 {
        LIST_HEAD(lgr_freeing_list);
        struct smcd_dev *smcd;
 
+       smc_core_going_away();
+
        spin_lock_bh(&smc_lgr_list.lock);
        list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
        spin_unlock_bh(&smc_lgr_list.lock);
 
        for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
                smc_ib_remember_port_attr(smcibdev, port_idx + 1);
                clear_bit(port_idx, &smcibdev->port_event_mask);
-               if (!smc_ib_port_active(smcibdev, port_idx + 1))
+               if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
+                       set_bit(port_idx, smcibdev->ports_going_away);
                        smc_port_terminate(smcibdev, port_idx + 1);
+               } else {
+                       clear_bit(port_idx, smcibdev->ports_going_away);
+               }
        }
 }
 
        switch (ibevent->event) {
        case IB_EVENT_DEVICE_FATAL:
                /* terminate all ports on device */
-               for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++)
+               for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
                        set_bit(port_idx, &smcibdev->port_event_mask);
+                       set_bit(port_idx, smcibdev->ports_going_away);
+               }
                schedule_work(&smcibdev->port_event_work);
                break;
        case IB_EVENT_PORT_ERR:
                port_idx = ibevent->element.port_num - 1;
                if (port_idx < SMC_MAX_PORTS) {
                        set_bit(port_idx, &smcibdev->port_event_mask);
+                       if (ibevent->event == IB_EVENT_PORT_ERR)
+                               set_bit(port_idx, smcibdev->ports_going_away);
+                       else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
+                               clear_bit(port_idx, smcibdev->ports_going_away);
                        schedule_work(&smcibdev->port_event_work);
                }
                break;
                port_idx = ibevent->element.qp->port - 1;
                if (port_idx < SMC_MAX_PORTS) {
                        set_bit(port_idx, &smcibdev->port_event_mask);
+                       set_bit(port_idx, smcibdev->ports_going_away);
                        schedule_work(&smcibdev->port_event_work);
                }
                break;
 
        u8                      initialized : 1; /* ib dev CQ, evthdl done */
        struct work_struct      port_event_work;
        unsigned long           port_event_mask;
+       DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
 };
 
 struct smc_buf_desc;
 
        spin_lock(&smcd_dev_list.lock);
        list_del(&smcd->list);
        spin_unlock(&smcd_dev_list.lock);
+       smcd->going_away = 1;
        flush_workqueue(smcd->event_wq);
        destroy_workqueue(smcd->event_wq);
        smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
 {
        struct smc_ism_event_work *wrk;
 
+       if (smcd->going_away)
+               return;
        /* copy event to event work queue, and let it be handled there */
        wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
        if (!wrk)
 
                        dev_put(ndev);
                        if (netdev == ndev &&
                            smc_ib_port_active(ibdev, i) &&
+                           !test_bit(i - 1, ibdev->ports_going_away) &&
                            !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
                                                  ini->ib_gid, NULL)) {
                                ini->ib_dev = ibdev;
                                continue;
                        if (smc_pnet_match(ibdev->pnetid[i - 1], ndev_pnetid) &&
                            smc_ib_port_active(ibdev, i) &&
+                           !test_bit(i - 1, ibdev->ports_going_away) &&
                            !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
                                                  ini->ib_gid, NULL)) {
                                ini->ib_dev = ibdev;
 
        spin_lock(&smcd_dev_list.lock);
        list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
-               if (smc_pnet_match(ismdev->pnetid, ndev_pnetid)) {
+               if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
+                   !ismdev->going_away) {
                        ini->ism_dev = ismdev;
                        break;
                }