void retransmit_timer(struct timer_list *t)
 {
        struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+       unsigned long flags;
 
        rxe_dbg_qp(qp, "retransmit timer fired\n");
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                qp->comp.timeout = 1;
                rxe_sched_task(&qp->comp.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
 
 static void comp_check_sq_drain_done(struct rxe_qp *qp)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
                if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
 
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
                        return;
                }
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static inline enum comp_state complete_ack(struct rxe_qp *qp,
  */
 static void reset_retry_timer(struct rxe_qp *qp)
 {
+       unsigned long flags;
+
        if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
-               spin_lock_bh(&qp->state_lock);
+               spin_lock_irqsave(&qp->state_lock, flags);
                if (qp_state(qp) >= IB_QPS_RTS &&
                    psn_compare(qp->req.psn, qp->comp.psn) > 0)
                        mod_timer(&qp->retrans_timer,
                                  jiffies + qp->qp_timeout_jiffies);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
 }
 
        struct rxe_pkt_info *pkt = NULL;
        enum comp_state state;
        int ret;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
 
                drain_resp_pkts(qp);
                flush_send_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (qp->comp.timeout) {
                qp->comp.timeout_retry = 1;
 
        int err;
        int is_request = pkt->mask & RXE_REQ_MASK;
        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
            (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
                goto drop;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        rxe_icrc_generate(skb, pkt);
 
 
        struct rxe_cq *rcq = to_rcq(init->recv_cq);
        struct rxe_cq *scq = to_rcq(init->send_cq);
        struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+       unsigned long flags;
 
        rxe_get(pd);
        rxe_get(rcq);
        if (err)
                goto err2;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_RESET;
        qp->valid = 1;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return 0;
 
 /* move the qp to the error state */
 void rxe_qp_error(struct rxe_qp *qp)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_ERR;
 
        /* drain work and packet queues */
        rxe_sched_task(&qp->resp.task);
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
                       int mask)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.sq_draining = 1;
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 /* caller should hold qp->state_lock */
                qp->attr.cur_qp_state = attr->qp_state;
 
        if (mask & IB_QP_STATE) {
-               spin_lock_bh(&qp->state_lock);
+               unsigned long flags;
+
+               spin_lock_irqsave(&qp->state_lock, flags);
                err = __qp_chk_state(qp, attr, mask);
                if (!err) {
                        qp->attr.qp_state = attr->qp_state;
                        rxe_dbg_qp(qp, "state -> %s\n",
                                        qps2str[attr->qp_state]);
                }
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
 
                if (err)
                        return err;
 /* called by the query qp verb */
 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
 {
+       unsigned long flags;
+
        *attr = qp->attr;
 
        attr->rq_psn                            = qp->resp.psn;
        /* Applications that get this state typically spin on it.
         * Yield the processor
         */
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->attr.sq_draining) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                cond_resched();
        } else {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
 
        return 0;
 static void rxe_qp_do_cleanup(struct work_struct *work)
 {
        struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->valid = 0;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
        qp->qp_timeout_jiffies = 0;
 
        if (qp_type(qp) == IB_QPT_RC) {
 
                            struct rxe_qp *qp)
 {
        unsigned int pkt_type;
+       unsigned long flags;
 
        if (unlikely(!qp->valid))
                return -EINVAL;
                return -EINVAL;
        }
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (pkt->mask & RXE_REQ_MASK) {
                if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
                        return -EINVAL;
                }
        } else {
                if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
                        return -EINVAL;
                }
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return 0;
 }
 
 void rnr_nak_timer(struct timer_list *t)
 {
        struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+       unsigned long flags;
 
        rxe_dbg_qp(qp, "nak timer fired\n");
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                /* request a send queue retry */
                qp->req.need_retry = 1;
                qp->req.wait_for_rnr_timer = 0;
                rxe_sched_task(&qp->req.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static void req_check_sq_drain_done(struct rxe_qp *qp)
        unsigned int index;
        unsigned int cons;
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_SQD) {
                q = qp->sq.queue;
                index = qp->req.wqe_index;
                                break;
 
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
 
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
                        return;
                } while (0);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 {
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
 
        req_check_sq_drain_done(qp);
 
        if (wqe == NULL)
                return NULL;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
                     (wqe->state != wqe_state_processing))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return NULL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
        return wqe;
        struct rxe_queue *q = qp->sq.queue;
        struct rxe_ah *ah;
        struct rxe_av *av;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
 
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
                wqe = __req_next_wqe(qp);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                if (wqe)
                        goto err;
                else
                qp->req.wait_psn = 0;
                qp->req.need_retry = 0;
                qp->req.wait_for_rnr_timer = 0;
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        /* we come here if the retransmit timer has fired
         * or if the rnr timer has fired. If the retransmit
 
        struct ib_uverbs_wc *uwc = &cqe.uibwc;
        struct rxe_recv_wqe *wqe = qp->resp.wqe;
        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       unsigned long flags;
 
        if (!wqe)
                goto finish;
                return RESPST_ERR_CQ_OVERFLOW;
 
 finish:
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return RESPST_CHK_RESOURCE;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (unlikely(!pkt))
                return RESPST_DONE;
        enum resp_states state;
        struct rxe_pkt_info *pkt = NULL;
        int ret;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
 
                drain_req_pkts(qp);
                flush_recv_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
 
 
        if (!err)
                rxe_sched_task(&qp->req.task);
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->comp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return err;
 }
 {
        struct rxe_qp *qp = to_rqp(ibqp);
        int err;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
 
        if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_err_qp(qp, "qp not ready to send");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (qp->is_user) {
                /* Utilize process context to do protocol processing */
        struct rxe_rq *rq = &qp->rq;
        unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
 
        /* see C10-97.2.1 */
        if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_dbg_qp(qp, "qp not ready to post recv");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (unlikely(qp->srq)) {
                *bad_wr = wr;
 
        spin_unlock_irqrestore(&rq->producer_lock, flags);
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->resp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return err;
 }