__flush_qp(qhp, rchp, schp, flag);
 }
 
-static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+                    struct c4iw_ep *ep)
 {
        struct fw_ri_wr *wqe;
        int ret;
        struct sk_buff *skb;
 
        PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
-            qhp->ep->hwtid);
+            ep->hwtid);
 
        skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
-       set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 
        wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
        memset(wqe, 0, sizeof *wqe);
                FW_WR_OP(FW_RI_INIT_WR) |
                FW_WR_COMPL(1));
        wqe->flowid_len16 = cpu_to_be32(
-               FW_WR_FLOWID(qhp->ep->hwtid) |
+               FW_WR_FLOWID(ep->hwtid) |
                FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
        wqe->cookie = (u64)&wr_wait;
 
                case C4IW_QP_STATE_CLOSING:
                        BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
                        qhp->attr.state = C4IW_QP_STATE_CLOSING;
+                       ep = qhp->ep;
                        if (!internal) {
                                abort = 0;
                                disconnect = 1;
-                               ep = qhp->ep;
                                c4iw_get_ep(&ep->com);
                        }
                        spin_unlock_irqrestore(&qhp->lock, flag);
-                       ret = rdma_fini(rhp, qhp);
+                       ret = rdma_fini(rhp, qhp, ep);
                        spin_lock_irqsave(&qhp->lock, flag);
                        if (ret) {
-                               ep = qhp->ep;
                                c4iw_get_ep(&ep->com);
                                disconnect = abort = 1;
                                goto err;