From 48727f5f448324e9eab5797a304d230ecf672d8c Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 20 Mar 2023 15:03:41 -0300 Subject: [PATCH] vmstat: switch per-cpu vmstat counters to 32-bits Some architectures only provide xchg/cmpxchg in 32/64-bit quantities. Since the next patch is about to use xchg on per-CPU vmstat counters, switch them to s32. Link: https://lkml.kernel.org/r/20230320180745.758267946@redhat.com Signed-off-by: Marcelo Tosatti Cc: Aaron Tomlin Cc: Christoph Lameter Cc: Frederic Weisbecker Cc: Heiko Carstens Cc: Huacai Chen Cc: Michal Hocko Cc: Peter Xu Cc: "Russell King (Oracle)" Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 8 ++++---- mm/vmstat.c | 32 ++++++++++++++++---------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2d3d78d01283..8f5a9e2c722a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -682,8 +682,8 @@ struct per_cpu_pages { struct per_cpu_zonestat { #ifdef CONFIG_SMP - s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; - s8 stat_threshold; + s32 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; + s32 stat_threshold; #endif #ifdef CONFIG_NUMA /* @@ -696,8 +696,8 @@ struct per_cpu_zonestat { }; struct per_cpu_nodestat { - s8 stat_threshold; - s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; + s32 stat_threshold; + s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; }; #endif /* !__GENERATING_BOUNDS.H */ diff --git a/mm/vmstat.c b/mm/vmstat.c index bac2970b985a..60f4967cd72c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -351,7 +351,7 @@ static inline void mod_zone_state(struct zone *zone, enum zone_stat_item item, long delta, int overstep_mode) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; + s32 __percpu *p = pcp->vm_stat_diff + item; long o, n, t, z; do { @@ -428,7 +428,7 @@ static inline void mod_node_state(struct pglist_data *pgdat, int delta, int overstep_mode) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; + s32 __percpu *p = pcp->vm_node_stat_diff + item; long o, n, t, z; if (vmstat_item_in_bytes(item)) { @@ -525,7 +525,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; + s32 __percpu *p = pcp->vm_stat_diff + item; long x; long t; @@ -556,7 +556,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long delta) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; + s32 __percpu *p = pcp->vm_node_stat_diff + item; long x; long t; @@ -614,8 +614,8 @@ EXPORT_SYMBOL(__mod_node_page_state); void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_stat_diff + item; + s32 v, t; /* See __mod_node_page_state */ preempt_disable_nested(); @@ -623,7 +623,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); @@ -635,8 +635,8 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_node_stat_diff + item; + s32 v, t; VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); @@ -646,7 +646,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; node_page_state_add(v + overstep, pgdat, item); __this_cpu_write(*p, -overstep); @@ -670,8 +670,8 @@ EXPORT_SYMBOL(__inc_node_page_state); void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_stat_diff + item; + s32 v, t; /* See __mod_node_page_state */ preempt_disable_nested(); @@ -679,7 +679,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); @@ -691,8 +691,8 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_node_stat_diff + item; + s32 v, t; VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); @@ -702,7 +702,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; node_page_state_add(v - overstep, pgdat, item); __this_cpu_write(*p, overstep); -- 2.50.1