struct list_head                ls_busylist;
        struct list_head                assoc_list;
        struct ida                      assoc_cnt;
-       struct nvmet_port               *port;
+       struct nvmet_fc_port_entry      *pe;
        struct kref                     ref;
        u32                             max_sg_cnt;
 };
 
+struct nvmet_fc_port_entry {
+       struct nvmet_fc_tgtport         *tgtport;
+       struct nvmet_port               *port;
+       u64                             node_name;
+       u64                             port_name;
+       struct list_head                pe_list;
+};
+
 struct nvmet_fc_defer_fcp_req {
        struct list_head                req_list;
        struct nvmefc_tgt_fcp_req       *fcp_req;
        atomic_t                        zrspcnt;
        atomic_t                        rsn;
        spinlock_t                      qlock;
-       struct nvmet_port               *port;
        struct nvmet_cq                 nvme_cq;
        struct nvmet_sq                 nvme_sq;
        struct nvmet_fc_tgt_assoc       *assoc;
 
 static LIST_HEAD(nvmet_fc_target_list);
 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+static LIST_HEAD(nvmet_fc_portentry_list);
 
 
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
        queue->qid = qid;
        queue->sqsize = sqsize;
        queue->assoc = assoc;
-       queue->port = assoc->tgtport->port;
        queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
        INIT_LIST_HEAD(&queue->fod_list);
        INIT_LIST_HEAD(&queue->avail_defer_list);
        return ret;
 }
 
+static void
+nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
+                       struct nvmet_fc_port_entry *pe,
+                       struct nvmet_port *port)
+{
+       lockdep_assert_held(&nvmet_fc_tgtlock);
+
+       pe->tgtport = tgtport;
+       tgtport->pe = pe;
+
+       pe->port = port;
+       port->priv = pe;
+
+       pe->node_name = tgtport->fc_target_port.node_name;
+       pe->port_name = tgtport->fc_target_port.port_name;
+       INIT_LIST_HEAD(&pe->pe_list);
+
+       list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
+}
+
+static void
+nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+       if (pe->tgtport)
+               pe->tgtport->pe = NULL;
+       list_del(&pe->pe_list);
+       spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a targetport deregisters. Breaks the relationship
+ * with the nvmet port, but leaves the port_entry in place so that
+ * re-registration can resume operation.
+ */
+static void
+nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+       struct nvmet_fc_port_entry *pe;
+       unsigned long flags;
+
+       spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+       pe = tgtport->pe;
+       if (pe)
+               pe->tgtport = NULL;
+       tgtport->pe = NULL;
+       spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a new targetport is registered. Looks in the
+ * existing nvmet port_entries to see if the nvmet layer is
+ * configured for the targetport's wwn's. (the targetport existed,
+ * nvmet configured, the lldd unregistered the tgtport, and is now
+ * reregistering the same targetport).  If so, set the nvmet port
+ * port entry on the targetport.
+ */
+static void
+nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+       struct nvmet_fc_port_entry *pe;
+       unsigned long flags;
+
+       spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+       list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
+               if (tgtport->fc_target_port.node_name == pe->node_name &&
+                   tgtport->fc_target_port.port_name == pe->port_name) {
+                       WARN_ON(pe->tgtport);
+                       tgtport->pe = pe;
+                       pe->tgtport = tgtport;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
 
 /**
  * nvme_fc_register_targetport - transport entry point called by an
                goto out_free_newrec;
        }
 
+       nvmet_fc_portentry_rebind_tgt(newrec);
+
        spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
        list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
        spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 {
        struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
 
+       nvmet_fc_portentry_unbind_tgt(tgtport);
+
        /* terminate any outstanding associations */
        __nvmet_fc_free_assocs(tgtport);
 
 
 
 /*
- * Actual processing routine for received FC-NVME LS Requests from the LLD
+ * Actual processing routine for received FC-NVME I/O Requests from the LLD
  */
 static void
 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
        u32 xfrlen = be32_to_cpu(cmdiu->data_len);
        int ret;
 
+       /*
+        * if there is no nvmet mapping to the targetport there
+        * shouldn't be requests. just terminate them.
+        */
+       if (!tgtport->pe)
+               goto transport_error;
+
        /*
         * Fused commands are currently not supported in the linux
         * implementation.
 
        fod->req.cmd = &fod->cmdiubuf.sqe;
        fod->req.rsp = &fod->rspiubuf.cqe;
-       fod->req.port = fod->queue->port;
+       fod->req.port = tgtport->pe->port;
 
        /* clear any response payload */
        memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
 nvmet_fc_add_port(struct nvmet_port *port)
 {
        struct nvmet_fc_tgtport *tgtport;
+       struct nvmet_fc_port_entry *pe;
        struct nvmet_fc_traddr traddr = { 0L, 0L };
        unsigned long flags;
        int ret;
        if (ret)
                return ret;
 
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return -ENOMEM;
+
        ret = -ENXIO;
        spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
        list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
                if ((tgtport->fc_target_port.node_name == traddr.nn) &&
                    (tgtport->fc_target_port.port_name == traddr.pn)) {
-                       tgtport->port = port;
-                       ret = 0;
+                       /* a FC port can only be 1 nvmet port id */
+                       if (!tgtport->pe) {
+                               nvmet_fc_portentry_bind(tgtport, pe, port);
+                               ret = 0;
+                       } else
+                               ret = -EALREADY;
                        break;
                }
        }
        spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+       if (ret)
+               kfree(pe);
+
        return ret;
 }
 
 static void
 nvmet_fc_remove_port(struct nvmet_port *port)
 {
-       /* nothing to do */
+       struct nvmet_fc_port_entry *pe = port->priv;
+
+       nvmet_fc_portentry_unbind(pe);
+
+       kfree(pe);
 }
 
 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {