if (ret)
return ret;
+ /* Make sure the in-progress rq flush has
+ * completed before reset the rq tail
+ * and head.
+ */
+ if (atomic_dec_and_test(&rq->flush_in_progress))
+ complete(&rq->can_reset);
+ wait_for_completion(&rq->can_reset);
+
/* Reset pointers */
memset(rq_sw, 0, sizeof(*rq_sw));
set_psif_rq_hw__head_indx(&rq->d, 0);
+
+ /* reset the flush_in_progress, if the qp is reset
+ * and the qp can be reused again.
+ * Thus, reset the flush_in_progress to 1.
+ */
+ atomic_set(&rq->flush_in_progress, 1);
}
mb();
}
rq->sg_entries = sg_entries;
- init_completion(&rq->can_destroy);
+ init_completion(&rq->can_reset);
atomic_set(&rq->refcnt, 1);
+ atomic_set(&rq->flush_in_progress, 1);
/* Initialize hw part of descriptor */
memset(&lrq_hw, 0, sizeof(lrq_hw));
if (len == 0)
goto error;
- if (atomic_add_unless(&rq->refcnt, 1, 0)) {
+ if (atomic_add_unless(&rq->flush_in_progress, 1, 0)) {
sif_log(sdev, SIF_INFO_V, "flushing %d entries out of %d/%d entries remaining",
len, atomic_read(&rq_sw->length), rq->entries);
if (test_and_clear_bit(FLUSH_RQ_IN_FLIGHT, &rq_sw->flags))
goto flush_rq_again;
free_rq_error:
- if (atomic_dec_and_test(&rq->refcnt))
- complete(&rq->can_destroy);
+ if (atomic_dec_and_test(&rq->flush_in_progress))
+ complete(&rq->can_reset);
}
error:
clear_bit(FLUSH_RQ_IN_PROGRESS, &rq_sw->flags);
int free_rq(struct sif_dev *sdev, int rq_idx)
{
- struct sif_rq *rq;
+ struct sif_rq *rq = get_sif_rq(sdev, rq_idx);
+ struct sif_rq_sw *rq_sw = get_sif_rq_sw(sdev, rq_idx);
- rq = get_sif_rq(sdev, rq_idx);
sif_log(sdev, SIF_RQ, "entry %d", rq_idx);
- if (!rq->is_srq) {
- if (atomic_dec_and_test(&rq->refcnt))
- complete(&rq->can_destroy);
- wait_for_completion(&rq->can_destroy);
- goto clean_rq;
- }
-
if (!atomic_dec_and_test(&rq->refcnt)) {
sif_log(sdev, SIF_RQ, "rq %d still in use - ref.cnt %d",
rq_idx, atomic_read(&rq->refcnt));
return -EBUSY;
}
-clean_rq:
+ /* Reset rq pointers, for srq and the error path in create_qp.
+ * This also means that rq_sw will be reset twice in the
+ * happy path for !srq.
+ */
+ memset(rq_sw, 0, sizeof(*rq_sw));
+ set_psif_rq_hw__head_indx(&rq->d, 0);
+
sif_release_rq(sdev, rq->index);
return 0;
}
bool is_srq; /* Set if this is a shared receive queue */
int xrc_domain; /* If != 0: This is an XRC SRQ member of this domain idx */
atomic_t refcnt; /* Ref.count for usage as a shared receive queue */
- struct completion can_destroy; /* use refcnt to synchronization in !srq case */
+ atomic_t flush_in_progress; /* flush in progress synchronization */
+ struct completion can_reset; /* use flush_in_progress to synchronization reset and flush */
u16 entries; /* Allocated entries */
u16 entries_user; /* Entries reported to user (entries -1 if max) */
u32 sg_entries; /* Max receive scatter/gather configured for this rq */