* cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
  *             redzone word.
  * cachep->obj_offset: The real object.
- * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
+ * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->size - 1* BYTES_PER_WORD: last caller address
  *                                     [BYTES_PER_WORD long]
  */
 static int obj_offset(struct kmem_cache *cachep)
 
 static int obj_size(struct kmem_cache *cachep)
 {
-       return cachep->obj_size;
+       return cachep->object_size;
 }
 
 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
        if (cachep->flags & SLAB_STORE_USER)
-               return (unsigned long long *)(objp + cachep->buffer_size -
+               return (unsigned long long *)(objp + cachep->size -
                                              sizeof(unsigned long long) -
                                              REDZONE_ALIGN);
-       return (unsigned long long *) (objp + cachep->buffer_size -
+       return (unsigned long long *) (objp + cachep->size -
                                       sizeof(unsigned long long));
 }
 
 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_STORE_USER));
-       return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
+       return (void **)(objp + cachep->size - BYTES_PER_WORD);
 }
 
 #else
 
 #define obj_offset(x)                  0
-#define obj_size(cachep)               (cachep->buffer_size)
+#define obj_size(cachep)               (cachep->size)
 #define dbg_redzone1(cachep, objp)     ({BUG(); (unsigned long long *)NULL;})
 #define dbg_redzone2(cachep, objp)     ({BUG(); (unsigned long long *)NULL;})
 #define dbg_userword(cachep, objp)     ({BUG(); (void **)NULL;})
 #ifdef CONFIG_TRACING
 size_t slab_buffer_size(struct kmem_cache *cachep)
 {
-       return cachep->buffer_size;
+       return cachep->size;
 }
 EXPORT_SYMBOL(slab_buffer_size);
 #endif
 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
                                 unsigned int idx)
 {
-       return slab->s_mem + cache->buffer_size * idx;
+       return slab->s_mem + cache->size * idx;
 }
 
 /*
- * We want to avoid an expensive divide : (offset / cache->buffer_size)
- *   Using the fact that buffer_size is a constant for a particular cache,
- *   we can replace (offset / cache->buffer_size) by
+ * We want to avoid an expensive divide : (offset / cache->size)
+ *   Using the fact that size is a constant for a particular cache,
+ *   we can replace (offset / cache->size) by
  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
  */
 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
        .batchcount = 1,
        .limit = BOOT_CPUCACHE_ENTRIES,
        .shared = 1,
-       .buffer_size = sizeof(struct kmem_cache),
+       .size = sizeof(struct kmem_cache),
        .name = "kmem_cache",
 };
 
        struct kmem_list3 *l3;
        const int memsize = sizeof(struct kmem_list3);
 
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                /*
                 * Set up the size64 kmemlist for cpu before we can
                 * begin anything. Make sure some other cpu on this
        int node = cpu_to_mem(cpu);
        const struct cpumask *mask = cpumask_of_node(node);
 
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                struct array_cache *nc;
                struct array_cache *shared;
                struct array_cache **alien;
         * the respective cache's slabs,  now we can go ahead and
         * shrink each nodelist to its limit.
         */
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                l3 = cachep->nodelists[node];
                if (!l3)
                        continue;
         * Now we can go ahead with allocating the shared arrays and
         * array caches
         */
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                struct array_cache *nc;
                struct array_cache *shared = NULL;
                struct array_cache **alien = NULL;
        struct kmem_cache *cachep;
        int ret = 0;
 
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                struct kmem_list3 *l3;
 
                l3 = cachep->nodelists[node];
 
        /* 1) create the cache_cache */
        INIT_LIST_HEAD(&cache_chain);
-       list_add(&cache_cache.next, &cache_chain);
+       list_add(&cache_cache.list, &cache_chain);
        cache_cache.colour_off = cache_line_size();
        cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
        cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
        /*
         * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
         */
-       cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+       cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
                                  nr_node_ids * sizeof(struct kmem_list3 *);
-#if DEBUG
-       cache_cache.obj_size = cache_cache.buffer_size;
-#endif
-       cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
+       cache_cache.object_size = cache_cache.size;
+       cache_cache.size = ALIGN(cache_cache.size,
                                        cache_line_size());
        cache_cache.reciprocal_buffer_size =
-               reciprocal_value(cache_cache.buffer_size);
+               reciprocal_value(cache_cache.size);
 
        for (order = 0; order < MAX_ORDER; order++) {
-               cache_estimate(order, cache_cache.buffer_size,
+               cache_estimate(order, cache_cache.size,
                        cache_line_size(), 0, &left_over, &cache_cache.num);
                if (cache_cache.num)
                        break;
 
        /* 6) resize the head arrays to their final sizes */
        mutex_lock(&cache_chain_mutex);
-       list_for_each_entry(cachep, &cache_chain, next)
+       list_for_each_entry(cachep, &cache_chain, list)
                if (enable_cpucache(cachep, GFP_NOWAIT))
                        BUG();
        mutex_unlock(&cache_chain_mutex);
                "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
                nodeid, gfpflags);
        printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
-               cachep->name, cachep->buffer_size, cachep->gfporder);
+               cachep->name, cachep->size, cachep->gfporder);
 
        for_each_online_node(node) {
                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
 
                if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-                       if (cachep->buffer_size % PAGE_SIZE == 0 &&
+                       if (cachep->size % PAGE_SIZE == 0 &&
                                        OFF_SLAB(cachep))
                                kernel_map_pages(virt_to_page(objp),
-                                       cachep->buffer_size / PAGE_SIZE, 1);
+                                       cachep->size / PAGE_SIZE, 1);
                        else
                                check_poison_obj(cachep, objp);
 #else
                mutex_lock(&cache_chain_mutex);
        }
 
-       list_for_each_entry(pc, &cache_chain, next) {
+       list_for_each_entry(pc, &cache_chain, list) {
                char tmp;
                int res;
 
                if (res) {
                        printk(KERN_ERR
                               "SLAB: cache with size %d has lost its name\n",
-                              pc->buffer_size);
+                              pc->size);
                        continue;
                }
 
                goto oops;
 
        cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+       cachep->object_size = size;
+       cachep->align = align;
 #if DEBUG
-       cachep->obj_size = size;
 
        /*
         * Both debugging options require word-alignment which is calculated
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
        if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
-           && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
+           && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
                cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
                size = PAGE_SIZE;
        }
        cachep->gfpflags = 0;
        if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
                cachep->gfpflags |= GFP_DMA;
-       cachep->buffer_size = size;
+       cachep->size = size;
        cachep->reciprocal_buffer_size = reciprocal_value(size);
 
        if (flags & CFLGS_OFF_SLAB) {
        }
 
        /* cache setup completed, link it into the list */
-       list_add(&cachep->next, &cache_chain);
+       list_add(&cachep->list, &cache_chain);
 oops:
        if (!cachep && (flags & SLAB_PANIC))
                panic("kmem_cache_create(): failed to create slab `%s'\n",
        /*
         * the chain is never empty, cache_cache is never destroyed
         */
-       list_del(&cachep->next);
+       list_del(&cachep->list);
        if (__cache_shrink(cachep)) {
                slab_error(cachep, "Can't free all objects");
-               list_add(&cachep->next, &cache_chain);
+               list_add(&cachep->list, &cache_chain);
                mutex_unlock(&cache_chain_mutex);
                put_online_cpus();
                return;
                                slab_error(cachep, "constructor overwrote the"
                                           " start of an object");
                }
-               if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
+               if ((cachep->size % PAGE_SIZE) == 0 &&
                            OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
                        kernel_map_pages(virt_to_page(objp),
-                                        cachep->buffer_size / PAGE_SIZE, 0);
+                                        cachep->size / PAGE_SIZE, 0);
 #else
                if (cachep->ctor)
                        cachep->ctor(objp);
 #endif
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+               if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
                        store_stackinfo(cachep, objp, (unsigned long)caller);
                        kernel_map_pages(virt_to_page(objp),
-                                        cachep->buffer_size / PAGE_SIZE, 0);
+                                        cachep->size / PAGE_SIZE, 0);
                } else {
                        poison_obj(cachep, objp, POISON_FREE);
                }
                return objp;
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+               if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
                        kernel_map_pages(virt_to_page(objp),
-                                        cachep->buffer_size / PAGE_SIZE, 1);
+                                        cachep->size / PAGE_SIZE, 1);
                else
                        check_poison_obj(cachep, objp);
 #else
                unsigned objnr;
 
                slabp = virt_to_head_page(objp)->slab_page;
-               objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
+               objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
                slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
        }
 #endif
        void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
 
        trace_kmem_cache_alloc(_RET_IP_, ret,
-                              obj_size(cachep), cachep->buffer_size, flags);
+                              obj_size(cachep), cachep->size, flags);
 
        return ret;
 }
                                       __builtin_return_address(0));
 
        trace_kmem_cache_alloc_node(_RET_IP_, ret,
-                                   obj_size(cachep), cachep->buffer_size,
+                                   obj_size(cachep), cachep->size,
                                    flags, nodeid);
 
        return ret;
        ret = __cache_alloc(cachep, flags, caller);
 
        trace_kmalloc((unsigned long) caller, ret,
-                     size, cachep->buffer_size, flags);
+                     size, cachep->size, flags);
 
        return ret;
 }
        return 0;
 
 fail:
-       if (!cachep->next.next) {
+       if (!cachep->list.next) {
                /* Cache is not active yet. Roll back what we did */
                node--;
                while (node >= 0) {
         * The numbers are guessed, we should auto-tune as described by
         * Bonwick.
         */
-       if (cachep->buffer_size > 131072)
+       if (cachep->size > 131072)
                limit = 1;
-       else if (cachep->buffer_size > PAGE_SIZE)
+       else if (cachep->size > PAGE_SIZE)
                limit = 8;
-       else if (cachep->buffer_size > 1024)
+       else if (cachep->size > 1024)
                limit = 24;
-       else if (cachep->buffer_size > 256)
+       else if (cachep->size > 256)
                limit = 54;
        else
                limit = 120;
         * to a larger limit. Thus disabled by default.
         */
        shared = 0;
-       if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
+       if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
                shared = 8;
 
 #if DEBUG
                /* Give up. Setup the next iteration. */
                goto out;
 
-       list_for_each_entry(searchp, &cache_chain, next) {
+       list_for_each_entry(searchp, &cache_chain, list) {
                check_irq_on();
 
                /*
 
 static int s_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
+       struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
        struct slab *slabp;
        unsigned long active_objs;
        unsigned long num_objs;
                printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
 
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-                  name, active_objs, num_objs, cachep->buffer_size,
+                  name, active_objs, num_objs, cachep->size,
                   cachep->num, (1 << cachep->gfporder));
        seq_printf(m, " : tunables %4u %4u %4u",
                   cachep->limit, cachep->batchcount, cachep->shared);
        /* Find the cache in the chain of caches. */
        mutex_lock(&cache_chain_mutex);
        res = -EINVAL;
-       list_for_each_entry(cachep, &cache_chain, next) {
+       list_for_each_entry(cachep, &cache_chain, list) {
                if (!strcmp(cachep->name, kbuf)) {
                        if (limit < 1 || batchcount < 1 ||
                                        batchcount > limit || shared < 0) {
        int i;
        if (n[0] == n[1])
                return;
-       for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
+       for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
                if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
                        continue;
                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
 
         * and whatever may come after it.
         */
        if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
-               return s->objsize;
+               return s->object_size;
 
 #endif
        /*
        if (p > addr + 16)
                print_section("Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->objsize,
+       print_section("Object ", p, min_t(unsigned long, s->object_size,
                                PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->objsize,
-                       s->inuse - s->objsize);
+               print_section("Redzone ", p + s->object_size,
+                       s->inuse - s->object_size);
 
        if (s->offset)
                off = s->offset + sizeof(void *);
        u8 *p = object;
 
        if (s->flags & __OBJECT_POISON) {
-               memset(p, POISON_FREE, s->objsize - 1);
-               p[s->objsize - 1] = POISON_END;
+               memset(p, POISON_FREE, s->object_size - 1);
+               p[s->object_size - 1] = POISON_END;
        }
 
        if (s->flags & SLAB_RED_ZONE)
-               memset(p + s->objsize, val, s->inuse - s->objsize);
+               memset(p + s->object_size, val, s->inuse - s->object_size);
 }
 
 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  *     Poisoning uses 0x6b (POISON_FREE) and the last byte is
  *     0xa5 (POISON_END)
  *
- * object + s->objsize
+ * object + s->object_size
  *     Padding to reach word boundary. This is also used for Redzoning.
  *     Padding is extended by another word if Redzoning is enabled and
- *     objsize == inuse.
+ *     object_size == inuse.
  *
  *     We fill with 0xbb (RED_INACTIVE) for inactive objects and with
  *     0xcc (RED_ACTIVE) for objects in use.
  * object + s->size
  *     Nothing is used beyond s->size.
  *
- * If slabcaches are merged then the objsize and inuse boundaries are mostly
+ * If slabcaches are merged then the object_size and inuse boundaries are mostly
  * ignored. And therefore no slab options that rely on these boundaries
  * may be used with merged slabcaches.
  */
                                        void *object, u8 val)
 {
        u8 *p = object;
-       u8 *endobject = object + s->objsize;
+       u8 *endobject = object + s->object_size;
 
        if (s->flags & SLAB_RED_ZONE) {
                if (!check_bytes_and_report(s, page, object, "Redzone",
-                       endobject, val, s->inuse - s->objsize))
+                       endobject, val, s->inuse - s->object_size))
                        return 0;
        } else {
-               if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
+               if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
                        check_bytes_and_report(s, page, p, "Alignment padding",
-                               endobject, POISON_INUSE, s->inuse - s->objsize);
+                               endobject, POISON_INUSE, s->inuse - s->object_size);
                }
        }
 
        if (s->flags & SLAB_POISON) {
                if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
                        (!check_bytes_and_report(s, page, p, "Poison", p,
-                                       POISON_FREE, s->objsize - 1) ||
+                                       POISON_FREE, s->object_size - 1) ||
                         !check_bytes_and_report(s, page, p, "Poison",
-                               p + s->objsize - 1, POISON_END, 1)))
+                               p + s->object_size - 1, POISON_END, 1)))
                        return 0;
                /*
                 * check_pad_bytes cleans up on its own.
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object, s->objsize);
+                       print_section("Object ", (void *)object, s->object_size);
 
                dump_stack();
        }
        lockdep_trace_alloc(flags);
        might_sleep_if(flags & __GFP_WAIT);
 
-       return should_failslab(s->objsize, flags, s->flags);
+       return should_failslab(s->object_size, flags, s->flags);
 }
 
 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
 {
        flags &= gfp_allowed_mask;
        kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
-       kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
+       kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
 }
 
 static inline void slab_free_hook(struct kmem_cache *s, void *x)
                unsigned long flags;
 
                local_irq_save(flags);
-               kmemcheck_slab_free(s, x, s->objsize);
-               debug_check_no_locks_freed(x, s->objsize);
+               kmemcheck_slab_free(s, x, s->object_size);
+               debug_check_no_locks_freed(x, s->object_size);
                local_irq_restore(flags);
        }
 #endif
        if (!(s->flags & SLAB_DEBUG_OBJECTS))
-               debug_check_no_obj_freed(x, s->objsize);
+               debug_check_no_obj_freed(x, s->object_size);
 }
 
 /*
 
 __setup("slub_debug", setup_slub_debug);
 
-static unsigned long kmem_cache_flags(unsigned long objsize,
+static unsigned long kmem_cache_flags(unsigned long object_size,
        unsigned long flags, const char *name,
        void (*ctor)(void *))
 {
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
                                        struct page *page) {}
 static inline void remove_full(struct kmem_cache *s, struct page *page) {}
-static inline unsigned long kmem_cache_flags(unsigned long objsize,
+static inline unsigned long kmem_cache_flags(unsigned long object_size,
        unsigned long flags, const char *name,
        void (*ctor)(void *))
 {
                "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
                nid, gfpflags);
        printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
-               "default order: %d, min order: %d\n", s->name, s->objsize,
+               "default order: %d, min order: %d\n", s->name, s->object_size,
                s->size, oo_order(s->oo), oo_order(s->min));
 
-       if (oo_order(s->min) > get_order(s->objsize))
+       if (oo_order(s->min) > get_order(s->object_size))
                printk(KERN_WARNING "  %s debugging increased min order, use "
                       "slub_debug=O to disable.\n", s->name);
 
        }
 
        if (unlikely(gfpflags & __GFP_ZERO) && object)
-               memset(object, 0, s->objsize);
+               memset(object, 0, s->object_size);
 
        slab_post_alloc_hook(s, gfpflags, object);
 
 {
        void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
+       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
 
        return ret;
 }
        void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
 
        trace_kmem_cache_alloc_node(_RET_IP_, ret,
-                                   s->objsize, s->size, gfpflags, node);
+                                   s->object_size, s->size, gfpflags, node);
 
        return ret;
 }
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        unsigned long flags = s->flags;
-       unsigned long size = s->objsize;
+       unsigned long size = s->object_size;
        unsigned long align = s->align;
        int order;
 
         * end of the object and the free pointer. If not then add an
         * additional word to have some bytes to store Redzone information.
         */
-       if ((flags & SLAB_RED_ZONE) && size == s->objsize)
+       if ((flags & SLAB_RED_ZONE) && size == s->object_size)
                size += sizeof(void *);
 #endif
 
         * user specified and the dynamic determination of cache line size
         * on bootup.
         */
-       align = calculate_alignment(flags, align, s->objsize);
+       align = calculate_alignment(flags, align, s->object_size);
        s->align = align;
 
        /*
        memset(s, 0, kmem_size);
        s->name = name;
        s->ctor = ctor;
-       s->objsize = size;
+       s->object_size = size;
        s->align = align;
        s->flags = kmem_cache_flags(size, flags, name, ctor);
        s->reserved = 0;
                 * Disable debugging flags that store metadata if the min slab
                 * order increased.
                 */
-               if (get_order(s->size) > get_order(s->objsize)) {
+               if (get_order(s->size) > get_order(s->object_size)) {
                        s->flags &= ~DEBUG_METADATA_FLAGS;
                        s->offset = 0;
                        if (!calculate_sizes(s, -1))
  */
 unsigned int kmem_cache_size(struct kmem_cache *s)
 {
-       return s->objsize;
+       return s->object_size;
 }
 EXPORT_SYMBOL(kmem_cache_size);
 
 
                if (s && s->size) {
                        char *name = kasprintf(GFP_NOWAIT,
-                                "dma-kmalloc-%d", s->objsize);
+                                "dma-kmalloc-%d", s->object_size);
 
                        BUG_ON(!name);
                        kmalloc_dma_caches[i] = create_kmalloc_cache(name,
-                               s->objsize, SLAB_CACHE_DMA);
+                               s->object_size, SLAB_CACHE_DMA);
                }
        }
 #endif
                 * Adjust the object sizes so that we clear
                 * the complete object on kzalloc.
                 */
-               s->objsize = max(s->objsize, (int)size);
+               s->object_size = max(s->object_size, (int)size);
                s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
 
                if (sysfs_slab_alias(s, name)) {
 
 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", s->objsize);
+       return sprintf(buf, "%d\n", s->object_size);
 }
 SLAB_ATTR_RO(object_size);
 
 static void print_slabinfo_header(struct seq_file *m)
 {
        seq_puts(m, "slabinfo - version: 2.1\n");
-       seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
+       seq_puts(m, "# name            <active_objs> <num_objs> <object_size> "
                 "<objperslab> <pagesperslab>");
        seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
        seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");