atomic_t cq_miss_occ; /* Global #times sif_poll_cq had to busy wait (upd.by destroy_cq) */
struct sif_eps *es; /* State for the EPS comm (sif_epsc.h) */
struct sif_table ba[sif_tab_max]; /* Base address setup structures */
- struct sif_pqp **pqp; /* PSIF management QPs */
+ struct sif_pqp_info pqi; /* PSIF management QP infrastructure */
struct sif_cb **kernel_cb[2]; /* cb's for the kernel (bw and low latency per cpu) */
- int pqp_cnt; /* Number of PQPs set up */
- atomic_t next_pqp; /* Used for round robin assignment of pqp */
int kernel_cb_cnt[2]; /* Number of CBs set up for the kernel for each kind */
struct sif_idr xrcd_refs; /* Mgmt of sif_xrcd allocations */
struct sif_idr pd_refs; /* Mgmt of sif_pd allocations */
- struct sif_spqp_pool ki_spqp; /* Stencil PQPs for key invalidates */
/* Misc settings */
struct completion ready_for_events; /* Set when we are ready to receive events from sif */
bool registered; /* Set when we are registered with the verbs layer */
if (!spqp)
sif_log(sdev, SIF_PQPT,
"All %u configured stencil pqps busy, consider increasing ki_spqp_size",
- sdev->ki_spqp.pool_sz);
+ sdev->pqi.ki_s.pool_sz);
}
/* Check if we should do a brute force whole MMU caches flush */
int ret = 0;
uint n_pqps = es->eqs.cnt - 2;
- sdev->pqp = sif_kmalloc(sdev, sizeof(struct sif_pqp *) * n_pqps, GFP_KERNEL | __GFP_ZERO);
- if (!sdev->pqp)
+ sdev->pqi.pqp = sif_kmalloc(sdev, sizeof(struct sif_pqp *) * n_pqps, GFP_KERNEL | __GFP_ZERO);
+ if (!sdev->pqi.pqp)
return -ENOMEM;
for (i = 0; i < n_pqps; i++) {
ret = PTR_ERR(pqp);
goto failed;
}
- sdev->pqp[i] = pqp;
+ sdev->pqi.pqp[i] = pqp;
}
- sdev->pqp_cnt = i;
- atomic_set(&sdev->next_pqp, 0);
+ sdev->pqi.cnt = i;
+ atomic_set(&sdev->pqi.next, 0);
return 0;
failed:
- sdev->pqp_cnt = i;
+ sdev->pqi.cnt = i;
sif_pqp_fini(sdev);
return ret;
}
* during takedown as these operations themselves
* generate PQP requests..
*/
- while (sdev->pqp_cnt > 0) {
- int i = sdev->pqp_cnt - 1;
- struct sif_pqp *pqp = sdev->pqp[i];
+ while (sdev->pqi.cnt > 0) {
+ int i = sdev->pqi.cnt - 1;
+ struct sif_pqp *pqp = sdev->pqi.pqp[i];
if (i > 0) {
/* Remove ourselves first, except the final PQP */
- sdev->pqp[i] = NULL;
- sdev->pqp_cnt--;
+ sdev->pqi.pqp[i] = NULL;
+ sdev->pqi.cnt--;
}
sif_destroy_pqp(sdev, pqp);
if (i == 0)
- sdev->pqp_cnt--;
+ sdev->pqi.cnt--;
}
- kfree(sdev->pqp);
- sdev->pqp = NULL;
+ kfree(sdev->pqi.pqp);
+ sdev->pqi.pqp = NULL;
}
int n = max(sif_ki_spqp_size, 0U);
int bm_len = max(1, n/8);
- mutex_init(&sdev->ki_spqp.lock);
- sdev->ki_spqp.spqp =
+ mutex_init(&sdev->pqi.ki_s.lock);
+ sdev->pqi.ki_s.spqp =
#ifdef CONFIG_NUMA
kmalloc_node(sizeof(struct sif_st_pqp *) * n, GFP_KERNEL | __GFP_ZERO,
sdev->pdev->dev.numa_node);
#else
kmalloc(sizeof(struct sif_st_pqp *) * n, GFP_KERNEL | __GFP_ZERO);
#endif
- if (!sdev->ki_spqp.spqp)
+ if (!sdev->pqi.ki_s.spqp)
return -ENOMEM;
- sdev->ki_spqp.bitmap =
+ sdev->pqi.ki_s.bitmap =
#ifdef CONFIG_NUMA
kmalloc_node(sizeof(ulong) * bm_len, GFP_KERNEL | __GFP_ZERO,
sdev->pdev->dev.numa_node);
#else
kmalloc(sizeof(ulong) * bm_len, GFP_KERNEL | __GFP_ZERO);
#endif
- if (!sdev->ki_spqp.bitmap) {
+ if (!sdev->pqi.ki_s.bitmap) {
ret = -ENOMEM;
goto bm_failed;
}
ret = PTR_ERR(spqp);
break;
}
- sdev->ki_spqp.spqp[i] = spqp;
+ sdev->pqi.ki_s.spqp[i] = spqp;
spqp->index = i;
}
- sdev->ki_spqp.pool_sz = i;
+ sdev->pqi.ki_s.pool_sz = i;
if (ret && i) {
sif_log(sdev, SIF_INFO, "Failed to create %d INVALIDATE_KEY stencil QPs", i);
sif_ki_spqp_fini(sdev);
sif_log(sdev, SIF_INIT, "Created %d INVALIDATE_KEY stencil QPs", i);
bm_failed:
if (ret)
- kfree(sdev->ki_spqp.spqp);
+ kfree(sdev->pqi.ki_s.spqp);
return 0; /* Never fail on stencil PQP allocation */
}
{
int i;
- if (!sdev->ki_spqp.spqp)
+ if (!sdev->pqi.ki_s.spqp)
return;
- for (i = sdev->ki_spqp.pool_sz - 1; i >= 0; i--)
- sif_destroy_st_pqp(sdev, sdev->ki_spqp.spqp[i]);
- kfree(sdev->ki_spqp.bitmap);
- kfree(sdev->ki_spqp.spqp);
- sdev->ki_spqp.spqp = NULL;
+ for (i = sdev->pqi.ki_s.pool_sz - 1; i >= 0; i--)
+ sif_destroy_st_pqp(sdev, sdev->pqi.ki_s.spqp[i]);
+ kfree(sdev->pqi.ki_s.bitmap);
+ kfree(sdev->pqi.ki_s.spqp);
+ sdev->pqi.ki_s.spqp = NULL;
}
{
int cpu;
- for (cpu = 0; cpu < sdev->pqp_cnt; cpu++)
- if (sdev->pqp[cpu])
- return sdev->pqp[cpu];
+ for (cpu = 0; cpu < sdev->pqi.cnt; cpu++)
+ if (sdev->pqi.pqp[cpu])
+ return sdev->pqi.pqp[cpu];
return NULL;
}
struct sif_pqp *get_pqp_same_eq(struct sif_dev *sdev, int comp_vector)
{
unsigned int pqp_index = comp_vector - 2;
- struct sif_pqp *pqp = sdev->pqp_cnt ? sdev->pqp[pqp_index % sdev->pqp_cnt] : NULL;
+ struct sif_pqp *pqp = sdev->pqi.cnt ? sdev->pqi.pqp[pqp_index % sdev->pqi.cnt] : NULL;
if (unlikely(!pqp)) {
/* Typically during take down */
struct sif_pqp *get_pqp(struct sif_dev *sdev)
{
unsigned int cpu = smp_processor_id();
- struct sif_pqp *pqp = sdev->pqp_cnt ? sdev->pqp[cpu % sdev->pqp_cnt] : NULL;
+ struct sif_pqp *pqp = sdev->pqi.cnt ? sdev->pqi.pqp[cpu % sdev->pqi.cnt] : NULL;
if (unlikely(!pqp)) {
/* Typically during take down */
struct sif_pqp *get_next_pqp(struct sif_dev *sdev)
{
struct sif_pqp *pqp;
- int next = atomic_inc_return(&sdev->next_pqp) % sdev->pqp_cnt;
+ int next = atomic_inc_return(&sdev->pqi.next) % sdev->pqi.cnt;
- pqp = sdev->pqp[next];
+ pqp = sdev->pqi.pqp[next];
if (unlikely(!pqp)) {
/* Typically during take down */
return find_any_pqp(sdev);
int index;
struct sif_st_pqp *spqp = NULL;
- mutex_lock(&sdev->ki_spqp.lock);
- index = find_next_zero_bit(sdev->ki_spqp.bitmap, sdev->ki_spqp.pool_sz, 0);
- if (index < sdev->ki_spqp.pool_sz) {
- set_bit(index, sdev->ki_spqp.bitmap);
- spqp = sdev->ki_spqp.spqp[index];
+ mutex_lock(&sdev->pqi.ki_s.lock);
+ index = find_next_zero_bit(sdev->pqi.ki_s.bitmap, sdev->pqi.ki_s.pool_sz, 0);
+ if (index < sdev->pqi.ki_s.pool_sz) {
+ set_bit(index, sdev->pqi.ki_s.bitmap);
+ spqp = sdev->pqi.ki_s.spqp[index];
}
- mutex_unlock(&sdev->ki_spqp.lock);
+ mutex_unlock(&sdev->pqi.ki_s.lock);
sif_log(sdev, SIF_PQPT, "bit index %d", index);
return spqp;
}
{
struct sif_dev *sdev = to_sdev(spqp->pqp.cq->ibcq.device);
- mutex_lock(&sdev->ki_spqp.lock);
- clear_bit(spqp->index, sdev->ki_spqp.bitmap);
- mutex_unlock(&sdev->ki_spqp.lock);
+ mutex_lock(&sdev->pqi.ki_s.lock);
+ clear_bit(spqp->index, sdev->pqi.ki_s.bitmap);
+ mutex_unlock(&sdev->pqi.ki_s.lock);
sif_log(sdev, SIF_PQPT, "bit index %d", spqp->index);
}
}
+
+/* Per PQP state/configuration info */
struct sif_pqp {
struct sif_qp *qp; /* The qp used */
struct sif_cq *cq; /* Associated completion queue for this priv.QP */
u16 lowpri_lim; /* Max number of outstanding low priority reqs */
};
+/* Stencil PQP support - pre-populated PQPs for special performance sensitive use cases */
+
+#define SPQP_DOORBELL_INTERVAL 8192
+
+struct sif_st_pqp {
+ struct sif_pqp pqp; /* The PQP to use - must be first */
+ struct sif_sq *sq; /* Short path to sq */
+ struct sif_sq_sw *sq_sw;/* Short path to sq_sw */
+ int index; /* The index of this st_pqp within it's pool */
+ u16 doorbell_interval; /* Interval between each doorbell write */
+ u16 doorbell_seq; /* Seq.no to use in next doorbell */
+ u16 next_doorbell_seq; /* Next seqno to ring doorbell */
+ u16 req_compl; /* Number of completions requested */
+ u16 next_poll_seq; /* Next seqno to set completion and wait/poll for one */
+ u64 checksum; /* Host endian partial checksum of stencil WR entries */
+};
+
+
+/* Stencil PQP management */
+struct sif_spqp_pool {
+ struct mutex lock; /* Protects access to this pool */
+ struct sif_st_pqp **spqp; /* Key invalidate stencil PQPs */
+ u32 pool_sz; /* Number of stencil PQPs set up */
+ ulong *bitmap; /* Bitmap for allocation from spqp */
+};
+
+/* PQP specific global state/configuration (embedded in sif_dev)
+ */
+struct sif_pqp_info {
+ struct sif_pqp **pqp; /* The array of "normal" PQPs */
+ int cnt; /* Number of PQPs set up */
+ atomic_t next; /* Used for round robin assignment of pqp */
+
+ /* Stencil PQPs for key invalidates */
+ struct sif_spqp_pool ki_s;
+};
+
struct sif_pqp *sif_create_pqp(struct sif_dev *sdev, int comp_vector);
int sif_destroy_pqp(struct sif_dev *sdev, struct sif_pqp *pqp);
/* Stencil PQP support - pre-populated PQPs for special performance sensitive use cases */
-#define SPQP_DOORBELL_INTERVAL 8192
-
-struct sif_st_pqp {
- struct sif_pqp pqp; /* The PQP to use - must be first */
- struct sif_sq *sq; /* Short path to sq */
- struct sif_sq_sw *sq_sw;/* Short path to sq_sw */
- int index; /* The index of this st_pqp within it's pool */
- u16 doorbell_interval; /* Interval between each doorbell write */
- u16 doorbell_seq; /* Seq.no to use in next doorbell */
- u16 next_doorbell_seq; /* Next seqno to ring doorbell */
- u16 req_compl; /* Number of completions requested */
- u16 next_poll_seq; /* Next seqno to set completion and wait/poll for one */
- u64 checksum; /* Host endian partial checksum of stencil WR entries */
-};
-
-
-/* Stencil PQP management */
-struct sif_spqp_pool {
- struct mutex lock; /* Protects access to this pool */
- struct sif_st_pqp **spqp; /* Key invalidate stencil PQPs */
- u32 pool_sz; /* Number of stencil PQPs set up */
- ulong *bitmap; /* Bitmap for allocation from spqp */
-};
-
-
struct sif_st_pqp *sif_create_inv_key_st_pqp(struct sif_dev *sdev);
/* get exclusive access to a stencil pqp */