*/
if (rq && !rq->is_srq
&& !test_bit(SIF_QPS_IN_RESET, &qp->persistent_state)) {
- if (sif_flush_rq(sdev, rq, qp, rq_len))
+ if (sif_flush_rq_wq(sdev, rq, qp, rq_len))
sif_log(sdev, SIF_INFO,
"failed to flush RQ %d", rq->index);
}
/* Owned by sif_r3.c - wa support */
struct sif_wa_stats wa_stats;
+ struct workqueue_struct *misc_wq; /* Used to flush send/receive queue */
};
/* TBD: These should probably come from common pci headers
ibqp->event_handler(&ibe, ibqp->qp_context);
} else {
/* WA #622: if reqular RQ, flush */
- if (sif_flush_rq(sdev, rq, qp, atomic_read(&rq_sw->length)))
+ if (sif_flush_rq_wq(sdev, rq, qp, atomic_read(&rq_sw->length)))
sif_log(sdev, SIF_INFO, "failed to flush RQ %d",
rq->index);
}
err = -ENOMEM;
goto wq_fail;
}
+ sdev->misc_wq = create_singlethread_workqueue("sif_misc_wq");
+ if (!sdev->misc_wq) {
+ sif_log(sdev, SIF_INFO, "Failed to allocate sif misc work queue");
+ err = -ENOMEM;
+ goto wq_fail;
+ }
err = sif_set_check_max_payload(sdev);
if (err)
pci_clear_master(dev);
pci_disable_device(dev);
flush_workqueue(sdev->wq);
+ flush_workqueue(sdev->misc_wq);
destroy_workqueue(sdev->wq);
+ destroy_workqueue(sdev->misc_wq);
sif_log(sdev, SIF_INFO, "removed device %s", sdev->ib_dev.name);
ib_dealloc_device(&sdev->ib_dev);
}
"flush requested for qp(type %s) with no rq defined",
string_enum_psif_qp_trans(qp->type));
} else {
- ret = sif_flush_rq(sdev, rq, qp, rq->entries);
+ ret = sif_flush_rq_wq(sdev, rq, qp, rq->entries);
if (ret)
sif_log(sdev, SIF_INFO, "failed to flush RQ %d", rq->index);
}
qp->ibqp.event_handler(&ibe, qp->ibqp.qp_context);
} else if (!rq->is_srq) {
/* WA #622: if reqular RQ, flush */
- ret = sif_flush_rq(sdev, rq, qp, rq->entries);
+ ret = sif_flush_rq_wq(sdev, rq, qp, rq->entries);
if (ret) {
sif_log(sdev, SIF_INFO, "failed to flush RQ %d",
rq->index);
#include "sif_defs.h"
#include <linux/seq_file.h>
+static void sif_flush_rq(struct work_struct *work);
+
int poll_wait_for_rq_writeback(struct sif_dev *sdev, struct sif_rq *rq)
{
unsigned long timeout = sdev->min_resp_ticks;
return n;
}
+
+int sif_flush_rq_wq(struct sif_dev *sdev, struct sif_rq *rq, struct sif_qp *target_qp,
+ int max_flushed_in_err)
+{
+ struct flush_rq_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+
+ memset(work, 0, sizeof(*work));
+ work->qp = target_qp;
+ work->sdev = sdev;
+ work->rq = rq;
+ work->entries = max_flushed_in_err;
+
+ INIT_WORK(&work->ws, sif_flush_rq);
+
+ queue_work(sdev->misc_wq, &work->ws);
+
+ return 0;
+}
+
/* Invalidate the RQ cache and flush a desired amount of
* the remaining entries in the given receive queue.
* @target_qp indicates the value of the local_qp field in the generated
* Note: No locking of the RQ is neccessary as there are multiple trigger points
* for flushing RQEs within OFED verbs model.
*/
-int sif_flush_rq(struct sif_dev *sdev, struct sif_rq *rq, struct sif_qp *target_qp,
- int max_flushed_in_err)
+static void sif_flush_rq(struct work_struct *work)
{
int len, real_len;
+ struct flush_rq_work *rq_work = container_of(work, struct flush_rq_work, ws);
+ struct sif_dev *sdev = rq_work->sdev;
+ struct sif_qp *target_qp = rq_work->qp;
+ struct sif_rq *rq = rq_work->rq;
+ int max_flushed_in_err = rq_work->entries;
struct sif_rq_sw *rq_sw = get_sif_rq_sw(sdev, rq->index);
int ret = 0;
u32 head, tail;
*/
if (test_bit(FLUSH_RQ_IN_PROGRESS, &rq_sw->flags)) {
set_bit(FLUSH_RQ_IN_FLIGHT, &rq_sw->flags);
- return ret;
+ goto done;
}
/* if race condition happened while trying to flush RQ,
*/
if (test_and_set_bit(FLUSH_RQ_IN_PROGRESS, &rq_sw->flags)) {
set_bit(FLUSH_RQ_IN_FLIGHT, &rq_sw->flags);
- return ret;
+ goto done;
}
if (!sif_feature(disable_rq_flush))
}
error:
clear_bit(FLUSH_RQ_IN_PROGRESS, &rq_sw->flags);
- return ret = ret > 0 ? 0 : ret;
+done:
+ kfree(rq_work);
}
struct sif_mem *mem; /* Allocated queue memory */
};
+struct flush_rq_work {
+ struct work_struct ws;
+ struct sif_dev *sdev;
+ struct sif_rq *rq;
+ struct sif_qp *qp;
+ int entries;
+};
+
static inline struct sif_rq *to_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct sif_rq, ibsrq);
* @target_qp indicates the value of the local_qp field in the generated
* completion but is not interpreted by SIF in any way.
*/
-int sif_flush_rq(struct sif_dev *sdev, struct sif_rq *rq,
+int sif_flush_rq_wq(struct sif_dev *sdev, struct sif_rq *rq,
struct sif_qp *target_qp, int max_flushed_in_err);
int free_rq(struct sif_dev *sdev, int rq_idx);
/* WA #622, Check if QP in ERROR, flush RQ */
if (!rq->is_srq && qp->last_set_state == IB_QPS_ERR) {
- if (sif_flush_rq(sdev, rq, qp, atomic_read(&rq_sw->length)))
+ if (sif_flush_rq_wq(sdev, rq, qp, atomic_read(&rq_sw->length)))
sif_log(sdev, SIF_INFO, "failed to flush RQ %d", rq->index);
}