Added these stats:
1. per-connection stat for number of receive buffers in cache
2. global stat for the same across all connections
3. number of bytes in socket receive buffer
Since stats are implemented using per-cpu variables and RDS currently
does unsigned arithmetic to add them up, separate counters (one for
addition and one for subtraction) are used for (2) and (3).
In the future we might change it to signed computation.
Orabug:
17045536
Signed-off-by: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
Signed-off-by: Bang Nguyen <bang.nguyen@oracle.com>
(cherry picked from commit
4631300fcf86d459d5dbb09791ff9198c51feab1)
uint64_t s_ib_srq_refills;
uint64_t s_ib_srq_empty_refills;
uint64_t s_ib_failed_apm;
+ uint64_t s_ib_recv_added_to_cache;
+ uint64_t s_ib_recv_removed_from_cache;
};
extern struct workqueue_struct *rds_ib_wq;
/* ib_stats.c */
DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
+#define rds_ib_stats_add(member, count) \
+ rds_stats_add_which(rds_ib_stats, member, count)
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
- atomic_inc(&ic->i_cache_allocs);
+ atomic_add(PAGE_SIZE/1024, &ic->i_cache_allocs);
+ rds_ib_stats_add(s_ib_recv_added_to_cache, PAGE_SIZE);
}
/* Recycle inc after freeing attached frags */
cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
if (cache_item) {
frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
- atomic_dec(&ic->i_cache_allocs);
+ atomic_sub(PAGE_SIZE/1024, &ic->i_cache_allocs);
+ rds_ib_stats_add(s_ib_recv_removed_from_cache, PAGE_SIZE);
} else {
frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
if (!frag)
"ib_srq_refills",
"ib_srq_empty_refills",
"ib_apm_reconnect",
+ "ib_recv_cache_added",
+ "ib_recv_cache_removed",
};
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
uint64_t s_cong_send_error;
uint64_t s_cong_send_blocked;
uint64_t s_qos_threshold_exceeded;
+ uint64_t s_recv_bytes_added_to_socket;
+ uint64_t s_recv_bytes_removed_from_socket;
};
/* af_rds.c */
return;
rs->rs_rcv_bytes += delta;
+ if (delta > 0)
+ rds_stats_add(s_recv_bytes_added_to_socket, delta);
+ else
+ rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
"cong_send_error",
"cong_send_blocked",
"qos_threshold_exceeded",
+ "recv_bytes_added_to_sock",
+ "recv_bytes_freed_fromsock",
};
void rds_stats_info_copy(struct rds_info_iterator *iter,