vmstat: switch per-cpu vmstat counters to 32-bits
authorMarcelo Tosatti <mtosatti@redhat.com>
Mon, 20 Mar 2023 18:03:41 +0000 (15:03 -0300)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:25:09 +0000 (16:25 -0700)
Some architectures only provide xchg/cmpxchg in 32/64-bit quantities.

Since the next patch is about to use xchg on per-CPU vmstat counters,
switch them to s32.

Link: https://lkml.kernel.org/r/20230320180745.758267946@redhat.com
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Aaron Tomlin <atomlin@atomlin.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: "Russell King (Oracle)" <linux@armlinux.org.uk>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/vmstat.c

index 2d3d78d0128335368d130336f6d2efd4cde38500..8f5a9e2c722a0e36089d2e06d7c5b422899a5135 100644 (file)
@@ -682,8 +682,8 @@ struct per_cpu_pages {
 
 struct per_cpu_zonestat {
 #ifdef CONFIG_SMP
-       s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
-       s8 stat_threshold;
+       s32 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+       s32 stat_threshold;
 #endif
 #ifdef CONFIG_NUMA
        /*
@@ -696,8 +696,8 @@ struct per_cpu_zonestat {
 };
 
 struct per_cpu_nodestat {
-       s8 stat_threshold;
-       s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
+       s32 stat_threshold;
+       s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
 };
 
 #endif /* !__GENERATING_BOUNDS.H */
index bac2970b985ae58f57cabe5d1e589afc5bd5da2a..60f4967cd72c3e51a8e40c8d31f3c5b96b2f1893 100644 (file)
@@ -351,7 +351,7 @@ static inline void mod_zone_state(struct zone *zone, enum zone_stat_item item,
                                  long delta, int overstep_mode)
 {
        struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
-       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s32 __percpu *p = pcp->vm_stat_diff + item;
        long o, n, t, z;
 
        do {
@@ -428,7 +428,7 @@ static inline void mod_node_state(struct pglist_data *pgdat,
                                  int delta, int overstep_mode)
 {
        struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
-       s8 __percpu *p = pcp->vm_node_stat_diff + item;
+       s32 __percpu *p = pcp->vm_node_stat_diff + item;
        long o, n, t, z;
 
        if (vmstat_item_in_bytes(item)) {
@@ -525,7 +525,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
                           long delta)
 {
        struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
-       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s32 __percpu *p = pcp->vm_stat_diff + item;
        long x;
        long t;
 
@@ -556,7 +556,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
                                long delta)
 {
        struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
-       s8 __percpu *p = pcp->vm_node_stat_diff + item;
+       s32 __percpu *p = pcp->vm_node_stat_diff + item;
        long x;
        long t;
 
@@ -614,8 +614,8 @@ EXPORT_SYMBOL(__mod_node_page_state);
 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
        struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
-       s8 __percpu *p = pcp->vm_stat_diff + item;
-       s8 v, t;
+       s32 __percpu *p = pcp->vm_stat_diff + item;
+       s32 v, t;
 
        /* See __mod_node_page_state */
        preempt_disable_nested();
@@ -623,7 +623,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
        v = __this_cpu_inc_return(*p);
        t = __this_cpu_read(pcp->stat_threshold);
        if (unlikely(v > t)) {
-               s8 overstep = t >> 1;
+               s32 overstep = t >> 1;
 
                zone_page_state_add(v + overstep, zone, item);
                __this_cpu_write(*p, -overstep);
@@ -635,8 +635,8 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 {
        struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
-       s8 __percpu *p = pcp->vm_node_stat_diff + item;
-       s8 v, t;
+       s32 __percpu *p = pcp->vm_node_stat_diff + item;
+       s32 v, t;
 
        VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 
@@ -646,7 +646,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
        v = __this_cpu_inc_return(*p);
        t = __this_cpu_read(pcp->stat_threshold);
        if (unlikely(v > t)) {
-               s8 overstep = t >> 1;
+               s32 overstep = t >> 1;
 
                node_page_state_add(v + overstep, pgdat, item);
                __this_cpu_write(*p, -overstep);
@@ -670,8 +670,8 @@ EXPORT_SYMBOL(__inc_node_page_state);
 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
        struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
-       s8 __percpu *p = pcp->vm_stat_diff + item;
-       s8 v, t;
+       s32 __percpu *p = pcp->vm_stat_diff + item;
+       s32 v, t;
 
        /* See __mod_node_page_state */
        preempt_disable_nested();
@@ -679,7 +679,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
        v = __this_cpu_dec_return(*p);
        t = __this_cpu_read(pcp->stat_threshold);
        if (unlikely(v < - t)) {
-               s8 overstep = t >> 1;
+               s32 overstep = t >> 1;
 
                zone_page_state_add(v - overstep, zone, item);
                __this_cpu_write(*p, overstep);
@@ -691,8 +691,8 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 {
        struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
-       s8 __percpu *p = pcp->vm_node_stat_diff + item;
-       s8 v, t;
+       s32 __percpu *p = pcp->vm_node_stat_diff + item;
+       s32 v, t;
 
        VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
 
@@ -702,7 +702,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
        v = __this_cpu_dec_return(*p);
        t = __this_cpu_read(pcp->stat_threshold);
        if (unlikely(v < - t)) {
-               s8 overstep = t >> 1;
+               s32 overstep = t >> 1;
 
                node_page_state_add(v - overstep, pgdat, item);
                __this_cpu_write(*p, overstep);