From: Knut Omang Date: Wed, 8 Jun 2016 12:10:27 +0000 (+0200) Subject: sif: Use kernel function printk_ratelimit() instead of home brew X-Git-Tag: v4.1.12-92~129^2~15 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=fea2b11f4c42105a8e0c1f14e94d35f3b3dc8ec5;p=users%2Fjedix%2Flinux-maple.git sif: Use kernel function printk_ratelimit() instead of home brew Removed sif_log_cq, sif_log_cq and the perf_sampling_threshold kernel module which was added for debugging purposes. Also adjust down a few log levels of some messages. Signed-off-by: Knut Omang --- diff --git a/drivers/infiniband/hw/sif/sif_cq.c b/drivers/infiniband/hw/sif/sif_cq.c index 7d0dc4b8fd6f5..5ea7b9b131f6a 100644 --- a/drivers/infiniband/hw/sif/sif_cq.c +++ b/drivers/infiniband/hw/sif/sif_cq.c @@ -256,7 +256,6 @@ struct sif_cq *create_cq(struct sif_pd *pd, int entries, comp_vector : sif_get_eq_channel(sdev, cq); cq->eq_idx = cq->cq_hw.int_channel + 2; - cq->next_logtime = jiffies; init_completion(&cq->cleanup_ok); cq->cq_hw.mmu_cntx = cq->mmu_ctx.mctx; @@ -832,7 +831,7 @@ int sif_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) seqno = cq_sw->next_seq; cqe = get_cq_entry(cq, seqno); - sif_log_cq(cq, SIF_POLL, "cq %d (requested %d entries), next_seq %d %s", + sif_log_rlim(sdev, SIF_POLL, "cq %d (requested %d entries), next_seq %d %s", cq->index, num_entries, cq_sw->next_seq, (wc ? "" : "(peek)")); while (npolled < num_entries) { @@ -880,7 +879,7 @@ handle_failed: sif_log(sdev, SIF_CQ, "done - %d completions - seq_no of next entry: %d", npolled, polled_value); else - sif_log_cq(cq, SIF_POLL, "no completions polled - seq_no of next entry: %d", + sif_log_rlim(sdev, SIF_POLL, "no completions polled - seq_no of next entry: %d", polled_value); return !ret ? npolled : ret; } diff --git a/drivers/infiniband/hw/sif/sif_cq.h b/drivers/infiniband/hw/sif/sif_cq.h index 402db2bd5b7f9..9905917bd1b1f 100644 --- a/drivers/infiniband/hw/sif/sif_cq.h +++ b/drivers/infiniband/hw/sif/sif_cq.h @@ -47,8 +47,6 @@ struct sif_cq { atomic_t error_cnt; /* No. of error completions observed on this cq */ atomic_t timeout_cnt; /* No. of completion timeouts observed on this cq */ atomic_t event_cnt; /* No. of completion events observed for this cq (will wrap..) */ - u32 log_cnt; /* Number of suppressed log messages since last print */ - unsigned long next_logtime; /* timeout for when to print next message */ struct sif_rq *xsrq; /* The XRC SRQ using this completion queue (see #3521) */ struct sif_pqp *pqp; /* The PQP using this completion queue (for dfs reporting..) */ }; diff --git a/drivers/infiniband/hw/sif/sif_dev.h b/drivers/infiniband/hw/sif/sif_dev.h index 88d661d300529..e07869ce3c76f 100644 --- a/drivers/infiniband/hw/sif/sif_dev.h +++ b/drivers/infiniband/hw/sif/sif_dev.h @@ -456,41 +456,17 @@ extern ulong sif_trace_mask; } \ } while (0) -#define sif_log_cq(cq, class, format, arg...) \ +#define sif_log_rlim(sdev, class, format, arg...) \ do { \ - if (unlikely((sif_debug_mask) & (class))) { \ - struct sif_dev *sdev = \ - container_of(cq->ibcq.device, struct sif_dev, ib_dev); \ - if (time_before((cq)->next_logtime, jiffies)) { \ - (cq)->next_logtime = jiffies + max(1000ULL, sdev->min_resp_ticks); \ - } else { \ - (cq)->log_cnt++; \ - continue; \ - } \ - dev_info(&sdev->pdev->dev, \ - "pid [%d] %s (suppressed %d): " format "\n", \ - current->pid, __func__, (cq)->log_cnt, \ - ## arg); \ - (cq)->log_cnt = 0; \ + sif_log_trace(class, format, ## arg); \ + if (unlikely((sif_debug_mask) & (class) && printk_ratelimit())) { \ + dev_info(&sdev->pdev->dev, \ + "[%d] " format "\n", \ + current->pid, \ + ## arg); \ } \ } while (0) -#define sif_log_perf(sdev, class, format, arg...) \ - do { \ - if (unlikely((sif_debug_mask) & (class))) { \ - if ((sdev)->jiffies_sampling_cnt % sif_perf_sampling_threshold) { \ - (sdev)->jiffies_sampling_cnt++; \ - continue; \ - } \ - dev_info(&(sdev)->pdev->dev, \ - "pid [%d] %s: " format "\n", \ - current->pid, __func__, \ - ## arg); \ - } \ - } while (0) - - - /* some convenience pointer conversion macros: */ #define to_sdev(ibdev) container_of((ibdev), struct sif_dev, ib_dev) diff --git a/drivers/infiniband/hw/sif/sif_fmr.c b/drivers/infiniband/hw/sif/sif_fmr.c index e2fc65229b4d2..79ec9adc69264 100644 --- a/drivers/infiniband/hw/sif/sif_fmr.c +++ b/drivers/infiniband/hw/sif/sif_fmr.c @@ -190,7 +190,7 @@ int sif_unmap_phys_fmr_list(struct list_head *fmr_list) goto out; cnt++; } - sif_log(sdev, SIF_INFO_V, "done with %d invalidates to MMU_VALID", cnt); + sif_log(sdev, SIF_FMR, "done with %d invalidates to MMU_VALID", cnt); cnt = 0; list_for_each_entry(ib_fmr, fmr_list, list) { @@ -200,7 +200,7 @@ int sif_unmap_phys_fmr_list(struct list_head *fmr_list) &(to_sfmr(ib_fmr))->mr->mmu_ctx, mode); cnt++; } - sif_log(sdev, SIF_INFO_V, "done with %d unmap_fmr_ctxs", cnt); + sif_log(sdev, SIF_FMR, "done with %d unmap_fmr_ctxs", cnt); key_to_invalid: cnt = 0; @@ -212,7 +212,7 @@ key_to_invalid: goto out; cnt++; } - sif_log(sdev, SIF_INFO_V, "done invalidating %d fmr keys%s", + sif_log(sdev, SIF_FMR, "done invalidating %d fmr keys%s", cnt, (spqp ? " (stencil)" : "")); if (flush_all) { @@ -227,7 +227,7 @@ key_to_invalid: cnt++; } ms = jiffies_to_msecs(jiffies - start_time); - sif_log_perf(sdev, SIF_PERF_V, "done unmapping %d fmrs in %u ms", cnt, ms); + sif_log_rlim(sdev, SIF_PERF_V, "done unmapping %d fmrs in %u ms", cnt, ms); out: if (spqp) sif_release_ki_spqp(spqp); diff --git a/drivers/infiniband/hw/sif/sif_main.c b/drivers/infiniband/hw/sif/sif_main.c index 0ecff0d72c454..bfe427eb9ab2e 100644 --- a/drivers/infiniband/hw/sif/sif_main.c +++ b/drivers/infiniband/hw/sif/sif_main.c @@ -108,11 +108,6 @@ uint sif_cb_max = 100; module_param_named(cb_max, sif_cb_max, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(cb_max, "Upper limit on no. of CBs."); -/* TBD - This is a debug feature to evaluate performance. */ -ushort sif_perf_sampling_threshold = 100; -module_param_named(perf_sampling_threshold, sif_perf_sampling_threshold, ushort, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(perf_sampling_threshold, "The performance measurement based on every N samples"); - uint sif_fmr_cache_flush_threshold = 512; module_param_named(fmr_cache_flush_threshold, sif_fmr_cache_flush_threshold, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fmr_cache_flush_threshold, "PF limit for when to use fast-path full MMU flush for FMR unmap"); diff --git a/drivers/infiniband/hw/sif/sif_sndrcv.c b/drivers/infiniband/hw/sif/sif_sndrcv.c index 71402a948888b..aad7fa0ece3b4 100644 --- a/drivers/infiniband/hw/sif/sif_sndrcv.c +++ b/drivers/infiniband/hw/sif/sif_sndrcv.c @@ -547,8 +547,8 @@ int sif_post_send_single(struct ib_qp *ibqp, struct ib_send_wr *wr, bool *use_db */ qp->traffic_patterns.mask = (qp->traffic_patterns.mask << 1) | HEUR_TX_DIRECTION; - sif_log_perf(sdev, SIF_PERF_V, "qp:traffic_pattern %x", - qp->traffic_patterns.mask); + sif_log_rlim(sdev, SIF_PERF_V, "qp:traffic_pattern %x", + qp->traffic_patterns.mask); /* If the traffic pattern shows that it's not latency sensitive, * use SQ mode by ringing the doorbell. * In a latency sensitive traffic pattern, a SEND should