/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
 /* when sending, force a reschedule every one of these periods */
 #define SEND_RESCHED_TIMEOUT (5 * HZ)  /* 5s in jiffies */
 
+void hfi1_do_send_from_rvt(struct rvt_qp *qp)
+{
+       hfi1_do_send(qp, false);
+}
+
 void _hfi1_do_send(struct work_struct *work)
 {
        struct iowait *wait = container_of(work, struct iowait, iowork);
        struct rvt_qp *qp = iowait_to_qp(wait);
 
-       hfi1_do_send(qp);
+       hfi1_do_send(qp, true);
 }
 
 /**
  * hfi1_do_send - perform a send on a QP
  * @work: contains a pointer to the QP
+ * @in_thread: true if in a workqueue thread
  *
  * Process entries in the send work queue until credit or queue is
  * exhausted.  Only allow one CPU to send a packet per QP.
  * Otherwise, two threads could send packets out of order.
  */
-void hfi1_do_send(struct rvt_qp *qp)
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
 {
        struct hfi1_pkt_state ps;
        struct hfi1_qp_priv *priv = qp->priv;
                        qp->s_hdrwords = 0;
                        /* allow other tasks to run */
                        if (unlikely(time_after(jiffies, timeout))) {
-                               if (workqueue_congested(cpu,
-                                                       ps.ppd->hfi1_wq)) {
+                               if (!in_thread ||
+                                   workqueue_congested(
+                                               cpu,
+                                               ps.ppd->hfi1_wq)) {
                                        spin_lock_irqsave(
                                                &qp->s_lock,
                                                ps.flags);
                                                *ps.ppd->dd->send_schedule);
                                        return;
                                }
-                               if (!irqs_disabled()) {
-                                       cond_resched();
-                                       this_cpu_inc(
-                                          *ps.ppd->dd->send_schedule);
-                               }
+                               cond_resched();
+                               this_cpu_inc(
+                                       *ps.ppd->dd->send_schedule);
                                timeout = jiffies + (timeout_int) / 8;
                        }
                        spin_lock_irqsave(&qp->s_lock, ps.flags);
 
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
        dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
        dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
        dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
-       dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
+       dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
        dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
        dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
        dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
 
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
 
 void _hfi1_do_send(struct work_struct *work);
 
-void hfi1_do_send(struct rvt_qp *qp);
+void hfi1_do_send_from_rvt(struct rvt_qp *qp);
+
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
 
 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
                        enum ib_wc_status status);