* coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               size_t page_size = PAGE_SIZE << compound_order(page);
-               __cpuc_flush_dcache_area(page_address(page), page_size);
+               __cpuc_flush_dcache_area(page_address(page), page_size(page));
        } else {
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
 
        struct page *page = pte_page(pte);
 
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-               sync_icache_aliases(page_address(page),
-                                   PAGE_SIZE << compound_order(page));
+               sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
 
        if (test_bit(PG_arch_1, &page->flags))
                return;                         /* i-cache is already coherent with d-cache */
 
-       flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+       flush_icache_range(addr, addr + page_size(page));
        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
 }
 
 
                        bool merge;
 
                        if (page)
-                               pg_size <<= compound_order(page);
+                               pg_size = page_size(page);
                        if (off < pg_size &&
                            skb_can_coalesce(skb, i, page, off)) {
                                merge = 1;
                                                           __GFP_NORETRY,
                                                           order);
                                        if (page)
-                                               pg_size <<=
-                                                       compound_order(page);
+                                               pg_size <<= order;
                                }
                                if (!page) {
                                        page = alloc_page(gfp);
 
                if (!page)
                        goto free_pages;
                list_add_tail(&page->lru, &pages);
-               size_remaining -= PAGE_SIZE << compound_order(page);
+               size_remaining -= page_size(page);
                max_order = compound_order(page);
                i++;
        }
 
        sg = table->sgl;
        list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-               sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+               sg_set_page(sg, page, page_size(page), 0);
                sg = sg_next(sg);
                list_del(&page->lru);
        }
 
                                           page, off_in_page, tlen);
                        fr_len(fp) += tlen;
                        fp_skb(fp)->data_len += tlen;
-                       fp_skb(fp)->truesize +=
-                                       PAGE_SIZE << compound_order(page);
+                       fp_skb(fp)->truesize += page_size(page);
                } else {
                        BUG_ON(!page);
                        from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
 
        }
 
        page = virt_to_head_page(ptr);
-       if (sz > (PAGE_SIZE << compound_order(page)))
+       if (sz > page_size(page))
                return -EINVAL;
 
        pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
 
 static inline struct hstate *page_hstate(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHuge(page), page);
-       return size_to_hstate(PAGE_SIZE << compound_order(page));
+       return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
 
        page[1].compound_order = order;
 }
 
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+       return PAGE_SIZE << compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
 
        head = compound_head(page);
        v += (page - head) << PAGE_SHIFT;
 
-       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+       if (likely(n <= v && v <= (page_size(head))))
                return true;
        WARN_ON(1);
        return false;
 
 
        for (i = 0; i < (1 << compound_order(page)); i++)
                page_kasan_tag_reset(page + i);
-       kasan_poison_shadow(page_address(page),
-                       PAGE_SIZE << compound_order(page),
+       kasan_poison_shadow(page_address(page), page_size(page),
                        KASAN_KMALLOC_REDZONE);
 }
 
        page = virt_to_page(ptr);
        redzone_start = round_up((unsigned long)(ptr + size),
                                KASAN_SHADOW_SCALE_SIZE);
-       redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+       redzone_end = (unsigned long)ptr + page_size(page);
 
        kasan_unpoison_shadow(ptr, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
                        kasan_report_invalid_free(ptr, ip);
                        return;
                }
-               kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-                               KASAN_FREE_PAGE);
+               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
        } else {
                __kasan_slab_free(page->slab_cache, ptr, ip, false);
        }
 
         * The ksize() function is only guaranteed to work for pointers
         * returned by kmalloc(). So handle arbitrary pointers here.
         */
-       return PAGE_SIZE << compound_order(page);
+       return page_size(page);
 }
 
 /**
 
 
        if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
-               pvmw->pte = huge_pte_offset(mm, pvmw->address,
-                                           PAGE_SIZE << compound_order(page));
+               pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
                if (!pvmw->pte)
                        return false;
 
 
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                0, vma, vma->vm_mm, address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        mmu_notifier_invalidate_range_start(&range);
 
        while (page_vma_mapped_walk(&pvmw)) {
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
                                address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        if (PageHuge(page)) {
                /*
                 * If sharing is possible, start and end will be adjusted
 
 
        sp = virt_to_page(block);
        if (unlikely(!PageSlab(sp)))
-               return PAGE_SIZE << compound_order(sp);
+               return page_size(sp);
 
        align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
        m = (unsigned int *)(block - align);
 
                return 1;
 
        start = page_address(page);
-       length = PAGE_SIZE << compound_order(page);
+       length = page_size(page);
        end = start + length;
        remainder = length % s->size;
        if (!remainder)
        init_tracking(s, object);
 }
 
-static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+static
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
 {
        if (!(s->flags & SLAB_POISON))
                return;
 
        metadata_access_enable();
-       memset(addr, POISON_INUSE, PAGE_SIZE << order);
+       memset(addr, POISON_INUSE, page_size(page));
        metadata_access_disable();
 }
 
 #else /* !CONFIG_SLUB_DEBUG */
 static inline void setup_object_debug(struct kmem_cache *s,
                        struct page *page, void *object) {}
-static inline void setup_page_debug(struct kmem_cache *s,
-                       void *addr, int order) {}
+static inline
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
 
 static inline int alloc_debug_processing(struct kmem_cache *s,
        struct page *page, void *object, unsigned long addr) { return 0; }
        struct kmem_cache_order_objects oo = s->oo;
        gfp_t alloc_gfp;
        void *start, *p, *next;
-       int idx, order;
+       int idx;
        bool shuffle;
 
        flags &= gfp_allowed_mask;
 
        page->objects = oo_objects(oo);
 
-       order = compound_order(page);
        page->slab_cache = s;
        __SetPageSlab(page);
        if (page_is_pfmemalloc(page))
 
        start = page_address(page);
 
-       setup_page_debug(s, start, order);
+       setup_page_debug(s, page, start);
 
        shuffle = shuffle_freelist(s, page);
 
 
        if (unlikely(!PageSlab(page))) {
                WARN_ON(!PageCompound(page));
-               return PAGE_SIZE << compound_order(page);
+               return page_size(page);
        }
 
        return slab_ksize(page->slab_cache);
 
        /* Matches the smp_wmb() in xsk_init_queue */
        smp_rmb();
        qpg = virt_to_head_page(q->ring);
-       if (size > (PAGE_SIZE << compound_order(qpg)))
+       if (size > page_size(qpg))
                return -EINVAL;
 
        pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;