]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
slub: make ->cpu_partial unsigned
authorAlexey Dobriyan <adobriyan@gmail.com>
Mon, 28 Jan 2019 19:31:17 +0000 (11:31 -0800)
committerBrian Maly <brian.maly@oracle.com>
Tue, 12 Feb 2019 19:17:09 +0000 (14:17 -0500)
/*
 * cpu_partial determined the maximum number of objects
 * kept in the per cpu partial lists of a processor.
 */

Can't be negative.

Orabug: 28620592

We can't reproduce the issue, this patch is expected to help in theory.

Link: http://lkml.kernel.org/r/20180305200730.15812-15-adobriyan@gmail.com
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: zhong jiang <zhongjiang@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
Reviewed-by: John Sobecki <john.sobecki@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
include/linux/slub_def.h
mm/slub.c

index 33885118523c7ce695e2c901e42d76250a84a8da..311cb45124b757e36787875fa2d3fb0277c6f85c 100644 (file)
@@ -67,7 +67,8 @@ struct kmem_cache {
        int size;               /* The size of an object including meta data */
        int object_size;        /* The size of an object without meta data */
        int offset;             /* Free pointer offset. */
-       int cpu_partial;        /* Number of per cpu partial objects to keep around */
+       /* Number of per cpu partial objects to keep around */
+       UEK_KABI_REPLACE(int cpu_partial, unsigned int cpu_partial)
        struct kmem_cache_order_objects oo;
 
        /* Allocation and freeing of slabs */
index 3d7932cc708468c40043fd34d2bba3486b92f30e..0419962fff24e54a69b4640be32f14070673f8fc 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1619,7 +1619,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 {
        struct page *page, *page2;
        void *object = NULL;
-       int available = 0;
+       unsigned int available = 0;
        int objects;
 
        /*
@@ -4460,10 +4460,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
                                 size_t length)
 {
-       unsigned long objects;
+       unsigned int objects;
        int err;
 
-       err = kstrtoul(buf, 10, &objects);
+       err = kstrtouint(buf, 10, &objects);
        if (err)
                return err;
        if (objects && !kmem_cache_has_cpu_partial(s))