comp_vector : sif_get_eq_channel(sdev, cq);
cq->eq_idx = cq->cq_hw.int_channel + 2;
- cq->next_logtime = jiffies;
init_completion(&cq->cleanup_ok);
cq->cq_hw.mmu_cntx = cq->mmu_ctx.mctx;
seqno = cq_sw->next_seq;
cqe = get_cq_entry(cq, seqno);
- sif_log_cq(cq, SIF_POLL, "cq %d (requested %d entries), next_seq %d %s",
+ sif_log_rlim(sdev, SIF_POLL, "cq %d (requested %d entries), next_seq %d %s",
cq->index, num_entries, cq_sw->next_seq, (wc ? "" : "(peek)"));
while (npolled < num_entries) {
sif_log(sdev, SIF_CQ, "done - %d completions - seq_no of next entry: %d",
npolled, polled_value);
else
- sif_log_cq(cq, SIF_POLL, "no completions polled - seq_no of next entry: %d",
+ sif_log_rlim(sdev, SIF_POLL, "no completions polled - seq_no of next entry: %d",
polled_value);
return !ret ? npolled : ret;
}
atomic_t error_cnt; /* No. of error completions observed on this cq */
atomic_t timeout_cnt; /* No. of completion timeouts observed on this cq */
atomic_t event_cnt; /* No. of completion events observed for this cq (will wrap..) */
- u32 log_cnt; /* Number of suppressed log messages since last print */
- unsigned long next_logtime; /* timeout for when to print next message */
struct sif_rq *xsrq; /* The XRC SRQ using this completion queue (see #3521) */
struct sif_pqp *pqp; /* The PQP using this completion queue (for dfs reporting..) */
};
} \
} while (0)
-#define sif_log_cq(cq, class, format, arg...) \
+#define sif_log_rlim(sdev, class, format, arg...) \
do { \
- if (unlikely((sif_debug_mask) & (class))) { \
- struct sif_dev *sdev = \
- container_of(cq->ibcq.device, struct sif_dev, ib_dev); \
- if (time_before((cq)->next_logtime, jiffies)) { \
- (cq)->next_logtime = jiffies + max(1000ULL, sdev->min_resp_ticks); \
- } else { \
- (cq)->log_cnt++; \
- continue; \
- } \
- dev_info(&sdev->pdev->dev, \
- "pid [%d] %s (suppressed %d): " format "\n", \
- current->pid, __func__, (cq)->log_cnt, \
- ## arg); \
- (cq)->log_cnt = 0; \
+ sif_log_trace(class, format, ## arg); \
+ if (unlikely((sif_debug_mask) & (class) && printk_ratelimit())) { \
+ dev_info(&sdev->pdev->dev, \
+ "[%d] " format "\n", \
+ current->pid, \
+ ## arg); \
} \
} while (0)
-#define sif_log_perf(sdev, class, format, arg...) \
- do { \
- if (unlikely((sif_debug_mask) & (class))) { \
- if ((sdev)->jiffies_sampling_cnt % sif_perf_sampling_threshold) { \
- (sdev)->jiffies_sampling_cnt++; \
- continue; \
- } \
- dev_info(&(sdev)->pdev->dev, \
- "pid [%d] %s: " format "\n", \
- current->pid, __func__, \
- ## arg); \
- } \
- } while (0)
-
-
-
/* some convenience pointer conversion macros: */
#define to_sdev(ibdev) container_of((ibdev), struct sif_dev, ib_dev)
goto out;
cnt++;
}
- sif_log(sdev, SIF_INFO_V, "done with %d invalidates to MMU_VALID", cnt);
+ sif_log(sdev, SIF_FMR, "done with %d invalidates to MMU_VALID", cnt);
cnt = 0;
list_for_each_entry(ib_fmr, fmr_list, list) {
&(to_sfmr(ib_fmr))->mr->mmu_ctx, mode);
cnt++;
}
- sif_log(sdev, SIF_INFO_V, "done with %d unmap_fmr_ctxs", cnt);
+ sif_log(sdev, SIF_FMR, "done with %d unmap_fmr_ctxs", cnt);
key_to_invalid:
cnt = 0;
goto out;
cnt++;
}
- sif_log(sdev, SIF_INFO_V, "done invalidating %d fmr keys%s",
+ sif_log(sdev, SIF_FMR, "done invalidating %d fmr keys%s",
cnt, (spqp ? " (stencil)" : ""));
if (flush_all) {
cnt++;
}
ms = jiffies_to_msecs(jiffies - start_time);
- sif_log_perf(sdev, SIF_PERF_V, "done unmapping %d fmrs in %u ms", cnt, ms);
+ sif_log_rlim(sdev, SIF_PERF_V, "done unmapping %d fmrs in %u ms", cnt, ms);
out:
if (spqp)
sif_release_ki_spqp(spqp);
module_param_named(cb_max, sif_cb_max, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(cb_max, "Upper limit on no. of CBs.");
-/* TBD - This is a debug feature to evaluate performance. */
-ushort sif_perf_sampling_threshold = 100;
-module_param_named(perf_sampling_threshold, sif_perf_sampling_threshold, ushort, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(perf_sampling_threshold, "The performance measurement based on every N samples");
-
uint sif_fmr_cache_flush_threshold = 512;
module_param_named(fmr_cache_flush_threshold, sif_fmr_cache_flush_threshold, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fmr_cache_flush_threshold, "PF limit for when to use fast-path full MMU flush for FMR unmap");
*/
qp->traffic_patterns.mask = (qp->traffic_patterns.mask << 1) |
HEUR_TX_DIRECTION;
- sif_log_perf(sdev, SIF_PERF_V, "qp:traffic_pattern %x",
- qp->traffic_patterns.mask);
+ sif_log_rlim(sdev, SIF_PERF_V, "qp:traffic_pattern %x",
+ qp->traffic_patterns.mask);
/* If the traffic pattern shows that it's not latency sensitive,
* use SQ mode by ringing the doorbell.
* In a latency sensitive traffic pattern, a SEND should