From 7b4fd805d064215a42b7fb8906121a0468818ff0 Mon Sep 17 00:00:00 2001 From: Wei Lin Guay Date: Tue, 5 Jul 2016 13:51:37 +0200 Subject: [PATCH] sif: rq: Do not clear the rq_sw until the completion of flush_rq Orabug: 23754857 The rq can be invalidated from reset_qp or flush_rq. Nevertheless, the rq_sw data structure has been reset after rq is invalidated in reset_qp regardless of the completion of the flush_rq. Thus, move the rq synchronization to reset_qp, and place the synchronization in between of reset_qp and flush_rq. After invalidating and reseting the rq, no flush rq is required as both head and tail have been reset to 0. This commit creates another atomic_t variable for the synchronization between reset rq and flush_rq. Signed-off-by: Wei Lin Guay Reviewed-by: Knut Omang --- drivers/infiniband/hw/sif/sif_qp.c | 14 ++++++++++++++ drivers/infiniband/hw/sif/sif_rq.c | 28 ++++++++++++++-------------- drivers/infiniband/hw/sif/sif_rq.h | 3 ++- 3 files changed, 30 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/sif/sif_qp.c b/drivers/infiniband/hw/sif/sif_qp.c index 53b578e092e8..41ef47699f94 100644 --- a/drivers/infiniband/hw/sif/sif_qp.c +++ b/drivers/infiniband/hw/sif/sif_qp.c @@ -2347,9 +2347,23 @@ failed: if (ret) return ret; + /* Make sure the in-progress rq flush has + * completed before reset the rq tail + * and head. + */ + if (atomic_dec_and_test(&rq->flush_in_progress)) + complete(&rq->can_reset); + wait_for_completion(&rq->can_reset); + /* Reset pointers */ memset(rq_sw, 0, sizeof(*rq_sw)); set_psif_rq_hw__head_indx(&rq->d, 0); + + /* reset the flush_in_progress, if the qp is reset + * and the qp can be reused again. + * Thus, reset the flush_in_progress to 1. + */ + atomic_set(&rq->flush_in_progress, 1); } mb(); diff --git a/drivers/infiniband/hw/sif/sif_rq.c b/drivers/infiniband/hw/sif/sif_rq.c index e3154ae35319..0ff96a96866c 100644 --- a/drivers/infiniband/hw/sif/sif_rq.c +++ b/drivers/infiniband/hw/sif/sif_rq.c @@ -153,8 +153,9 @@ int alloc_rq(struct sif_dev *sdev, struct sif_pd *pd, } rq->sg_entries = sg_entries; - init_completion(&rq->can_destroy); + init_completion(&rq->can_reset); atomic_set(&rq->refcnt, 1); + atomic_set(&rq->flush_in_progress, 1); /* Initialize hw part of descriptor */ memset(&lrq_hw, 0, sizeof(lrq_hw)); @@ -359,7 +360,7 @@ static void sif_flush_rq(struct work_struct *work) if (len == 0) goto error; - if (atomic_add_unless(&rq->refcnt, 1, 0)) { + if (atomic_add_unless(&rq->flush_in_progress, 1, 0)) { sif_log(sdev, SIF_INFO_V, "flushing %d entries out of %d/%d entries remaining", len, atomic_read(&rq_sw->length), rq->entries); @@ -569,8 +570,8 @@ flush_rq_again: if (test_and_clear_bit(FLUSH_RQ_IN_FLIGHT, &rq_sw->flags)) goto flush_rq_again; free_rq_error: - if (atomic_dec_and_test(&rq->refcnt)) - complete(&rq->can_destroy); + if (atomic_dec_and_test(&rq->flush_in_progress)) + complete(&rq->can_reset); } error: clear_bit(FLUSH_RQ_IN_PROGRESS, &rq_sw->flags); @@ -581,25 +582,24 @@ done: int free_rq(struct sif_dev *sdev, int rq_idx) { - struct sif_rq *rq; + struct sif_rq *rq = get_sif_rq(sdev, rq_idx); + struct sif_rq_sw *rq_sw = get_sif_rq_sw(sdev, rq_idx); - rq = get_sif_rq(sdev, rq_idx); sif_log(sdev, SIF_RQ, "entry %d", rq_idx); - if (!rq->is_srq) { - if (atomic_dec_and_test(&rq->refcnt)) - complete(&rq->can_destroy); - wait_for_completion(&rq->can_destroy); - goto clean_rq; - } - if (!atomic_dec_and_test(&rq->refcnt)) { sif_log(sdev, SIF_RQ, "rq %d still in use - ref.cnt %d", rq_idx, atomic_read(&rq->refcnt)); return -EBUSY; } -clean_rq: + /* Reset rq pointers, for srq and the error path in create_qp. + * This also means that rq_sw will be reset twice in the + * happy path for !srq. + */ + memset(rq_sw, 0, sizeof(*rq_sw)); + set_psif_rq_hw__head_indx(&rq->d, 0); + sif_release_rq(sdev, rq->index); return 0; } diff --git a/drivers/infiniband/hw/sif/sif_rq.h b/drivers/infiniband/hw/sif/sif_rq.h index f7aea2cb5a18..70c83cd24fbd 100644 --- a/drivers/infiniband/hw/sif/sif_rq.h +++ b/drivers/infiniband/hw/sif/sif_rq.h @@ -26,7 +26,8 @@ struct sif_rq { bool is_srq; /* Set if this is a shared receive queue */ int xrc_domain; /* If != 0: This is an XRC SRQ member of this domain idx */ atomic_t refcnt; /* Ref.count for usage as a shared receive queue */ - struct completion can_destroy; /* use refcnt to synchronization in !srq case */ + atomic_t flush_in_progress; /* flush in progress synchronization */ + struct completion can_reset; /* use flush_in_progress to synchronization reset and flush */ u16 entries; /* Allocated entries */ u16 entries_user; /* Entries reported to user (entries -1 if max) */ u32 sg_entries; /* Max receive scatter/gather configured for this rq */ -- 2.50.1