]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-introduce-memdesc_flags_t-fix
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Aug 2025 12:42:22 +0000 (08:42 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:06 +0000 (17:25 -0700)
s/flags/flags.f/ in several architectures

Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
35 files changed:
arch/arc/mm/cache.c
arch/arc/mm/tlb.c
arch/arm/include/asm/hugetlb.h
arch/arm/mm/copypage-v4mc.c
arch/arm/mm/copypage-v6.c
arch/arm/mm/copypage-xscale.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault-armv.c
arch/arm/mm/flush.c
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/mte.h
arch/arm64/mm/flush.c
arch/csky/abiv1/cacheflush.c
arch/nios2/mm/cacheflush.c
arch/openrisc/include/asm/cacheflush.h
arch/openrisc/mm/cache.c
arch/parisc/kernel/cache.c
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/pgtable.c
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/hugetlb.h
arch/riscv/mm/cacheflush.c
arch/s390/include/asm/hugetlb.h
arch/s390/kernel/uv.c
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c
arch/sh/include/asm/hugetlb.h
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh7705.c
arch/sh/mm/cache.c
arch/sh/mm/kmap.c
arch/sparc/mm/init_64.c
arch/xtensa/mm/cache.c

index 9106ceac323c43559bed01ce3697e124dc3c5e49..7d2f93dc1e918da97fab70f78a496555453f2cd6 100644 (file)
@@ -704,7 +704,7 @@ static inline void arc_slc_enable(void)
 
 void flush_dcache_folio(struct folio *folio)
 {
-       clear_bit(PG_dc_clean, &folio->flags);
+       clear_bit(PG_dc_clean, &folio->flags.f);
        return;
 }
 EXPORT_SYMBOL(flush_dcache_folio);
@@ -889,8 +889,8 @@ void copy_user_highpage(struct page *to, struct page *from,
 
        copy_page(kto, kfrom);
 
-       clear_bit(PG_dc_clean, &dst->flags);
-       clear_bit(PG_dc_clean, &src->flags);
+       clear_bit(PG_dc_clean, &dst->flags.f);
+       clear_bit(PG_dc_clean, &src->flags.f);
 
        kunmap_atomic(kto);
        kunmap_atomic(kfrom);
@@ -900,7 +900,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
 {
        struct folio *folio = page_folio(page);
        clear_page(to);
-       clear_bit(PG_dc_clean, &folio->flags);
+       clear_bit(PG_dc_clean, &folio->flags.f);
 }
 EXPORT_SYMBOL(clear_user_page);
 
index cae4a7aae0ed4e186addda91d54b02153ca7181c..ed6915ba76ec46460f605b7e8068908a1ccc6ac0 100644 (file)
@@ -488,7 +488,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
         */
        if (vma->vm_flags & VM_EXEC) {
                struct folio *folio = page_folio(page);
-               int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+               int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
                if (dirty) {
                        unsigned long offset = offset_in_folio(folio, paddr);
                        nr = folio_nr_pages(folio);
index b766c4b373f647385680e06b4255549df9835ba4..700055b1ccb3a402fec78b381f0e7a1d92abf029 100644 (file)
@@ -17,7 +17,7 @@
 
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
-       clear_bit(PG_dcache_clean, &folio->flags);
+       clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
index 7ddd82b9fe8b2f1473e031c0fa8bc1240581c7b5..ed843bb22020b3b7bb1bff392aa2989df3109cbb 100644 (file)
@@ -67,7 +67,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
        struct folio *src = page_folio(from);
        void *kto = kmap_atomic(to);
 
-       if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
                __flush_dcache_folio(folio_flush_mapping(src), src);
 
        raw_spin_lock(&minicache_lock);
index a1a71f36d8502758b4fb1f69ad2e79cf96fd1a5b..0710dba5c0bfc48d4a6d38067f45524feeb86847 100644 (file)
@@ -73,7 +73,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
        unsigned int offset = CACHE_COLOUR(vaddr);
        unsigned long kfrom, kto;
 
-       if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
                __flush_dcache_folio(folio_flush_mapping(src), src);
 
        /* FIXME: not highmem safe */
index f1e29d3e81930d127636e8967d03448fc1cebb0e..e16af68d709fd085b82c4d7c542c3b80d8a36be9 100644 (file)
@@ -87,7 +87,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
        struct folio *src = page_folio(from);
        void *kto = kmap_atomic(to);
 
-       if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
                __flush_dcache_folio(folio_flush_mapping(src), src);
 
        raw_spin_lock(&minicache_lock);
index 88c2d68a69c9ee9b43d2a916444b9b6eaaf601d7..08641a936394ce884a8bf490bdd594bf9015591e 100644 (file)
@@ -718,7 +718,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
                        if (size < sz)
                                break;
                        if (!offset)
-                               set_bit(PG_dcache_clean, &folio->flags);
+                               set_bit(PG_dcache_clean, &folio->flags.f);
                        offset = 0;
                        size -= sz;
                        if (!size)
index 39fd5df73317856d087ba63410e37810ee0dde90..91e488767783e7fbe38f9041d9625d12f61e01ee 100644 (file)
@@ -203,7 +203,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
 
        folio = page_folio(pfn_to_page(pfn));
        mapping = folio_flush_mapping(folio);
-       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
                __flush_dcache_folio(mapping, folio);
        if (mapping) {
                if (cache_is_vivt())
index 5219158d54cf71f32ef5caf58a552cb10148dedc..19470d938b2361312ad36202912e7ffe6fda9119 100644 (file)
@@ -304,7 +304,7 @@ void __sync_icache_dcache(pte_t pteval)
        else
                mapping = NULL;
 
-       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
                __flush_dcache_folio(mapping, folio);
 
        if (pte_exec(pteval))
@@ -343,8 +343,8 @@ void flush_dcache_folio(struct folio *folio)
                return;
 
        if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
-               if (test_bit(PG_dcache_clean, &folio->flags))
-                       clear_bit(PG_dcache_clean, &folio->flags);
+               if (test_bit(PG_dcache_clean, &folio->flags.f))
+                       clear_bit(PG_dcache_clean, &folio->flags.f);
                return;
        }
 
@@ -352,14 +352,14 @@ void flush_dcache_folio(struct folio *folio)
 
        if (!cache_ops_need_broadcast() &&
            mapping && !folio_mapped(folio))
-               clear_bit(PG_dcache_clean, &folio->flags);
+               clear_bit(PG_dcache_clean, &folio->flags.f);
        else {
                __flush_dcache_folio(mapping, folio);
                if (mapping && cache_is_vivt())
                        __flush_dcache_aliases(mapping, folio);
                else if (mapping)
                        __flush_icache_all();
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 EXPORT_SYMBOL(flush_dcache_folio);
index 2a8155c4a8826c5819a02831530153f7a05e9406..44c1f757bfcf8e4cf987cbb147de7c55eba25e03 100644 (file)
@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
 
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
-       clear_bit(PG_dcache_clean, &folio->flags);
+       clear_bit(PG_dcache_clean, &folio->flags.f);
 
 #ifdef CONFIG_ARM64_MTE
        if (system_supports_mte()) {
-               clear_bit(PG_mte_tagged, &folio->flags);
-               clear_bit(PG_mte_lock, &folio->flags);
+               clear_bit(PG_mte_tagged, &folio->flags.f);
+               clear_bit(PG_mte_lock, &folio->flags.f);
        }
 #endif
 }
index 6567df8ec8ca8180f3b7748b57819986cf2e1a9e..3b5069f4683d8767b2779f8b00e44ab1eb186dcc 100644 (file)
@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
         * before the page flags update.
         */
        smp_wmb();
-       set_bit(PG_mte_tagged, &page->flags);
+       set_bit(PG_mte_tagged, &page->flags.f);
 }
 
 static inline bool page_mte_tagged(struct page *page)
 {
-       bool ret = test_bit(PG_mte_tagged, &page->flags);
+       bool ret = test_bit(PG_mte_tagged, &page->flags.f);
 
        VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
 
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
 {
        VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
 
-       if (!test_and_set_bit(PG_mte_lock, &page->flags))
+       if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
                return true;
 
        /*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
         * already. Check if the PG_mte_tagged flag has been set or wait
         * otherwise.
         */
-       smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+       smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
 
        return false;
 }
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
         * before the folio flags update.
         */
        smp_wmb();
-       set_bit(PG_mte_tagged, &folio->flags);
+       set_bit(PG_mte_tagged, &folio->flags.f);
 
 }
 
 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
 {
-       bool ret = test_bit(PG_mte_tagged, &folio->flags);
+       bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
 
        VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
 
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
 {
        VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
 
-       if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+       if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
                return true;
 
        /*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
         * already. Check if the PG_mte_tagged flag has been set or wait
         * otherwise.
         */
-       smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+       smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
 
        return false;
 }
index 013eead9b69506a6f8df43ef34099ab195b3168c..fbf08b543c3fd0a9c133ba0a0fd55d3b08c99f0d 100644 (file)
@@ -53,11 +53,11 @@ void __sync_icache_dcache(pte_t pte)
 {
        struct folio *folio = page_folio(pte_page(pte));
 
-       if (!test_bit(PG_dcache_clean, &folio->flags)) {
+       if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
                sync_icache_aliases((unsigned long)folio_address(folio),
                                    (unsigned long)folio_address(folio) +
                                            folio_size(folio));
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -69,8 +69,8 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
  */
 void flush_dcache_folio(struct folio *folio)
 {
-       if (test_bit(PG_dcache_clean, &folio->flags))
-               clear_bit(PG_dcache_clean, &folio->flags);
+       if (test_bit(PG_dcache_clean, &folio->flags.f))
+               clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 EXPORT_SYMBOL(flush_dcache_folio);
 
index 171e8fb32285db54fbba4de654af8f48fc5f2ed0..4bc0aad3cf8a90ad2443cc05a56c9295d9637dd9 100644 (file)
@@ -25,12 +25,12 @@ void flush_dcache_folio(struct folio *folio)
        mapping = folio_flush_mapping(folio);
 
        if (mapping && !folio_mapped(folio))
-               clear_bit(PG_dcache_clean, &folio->flags);
+               clear_bit(PG_dcache_clean, &folio->flags.f);
        else {
                dcache_wbinv_all();
                if (mapping)
                        icache_inv_all();
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 EXPORT_SYMBOL(flush_dcache_folio);
@@ -56,7 +56,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
                return;
 
        folio = page_folio(pfn_to_page(pfn));
-       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
                dcache_wbinv_all();
 
        if (folio_flush_mapping(folio)) {
index 0ee9c5f02e08ebb57233d7da418f3e74b8abd189..8321182eb9276f9e80f63b7790307a88c0d3a888 100644 (file)
@@ -187,7 +187,7 @@ void flush_dcache_folio(struct folio *folio)
 
        /* Flush this page if there are aliases. */
        if (mapping && !mapping_mapped(mapping)) {
-               clear_bit(PG_dcache_clean, &folio->flags);
+               clear_bit(PG_dcache_clean, &folio->flags.f);
        } else {
                __flush_dcache_folio(folio);
                if (mapping) {
@@ -195,7 +195,7 @@ void flush_dcache_folio(struct folio *folio)
                        flush_aliases(mapping, folio);
                        flush_icache_range(start, start + folio_size(folio));
                }
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 EXPORT_SYMBOL(flush_dcache_folio);
@@ -227,7 +227,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
                return;
 
        folio = page_folio(pfn_to_page(pfn));
-       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+       if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
                __flush_dcache_folio(folio);
 
        mapping = folio_flush_mapping(folio);
index 0e60af486ec15572cc3a4b03095838af976ce638..cd8f971c0feccd05f9fee548fef25d118c7a4bdb 100644 (file)
@@ -75,7 +75,7 @@ static inline void sync_icache_dcache(struct page *page)
 
 static inline void flush_dcache_folio(struct folio *folio)
 {
-       clear_bit(PG_dc_clean, &folio->flags);
+       clear_bit(PG_dc_clean, &folio->flags.f);
 }
 #define flush_dcache_folio flush_dcache_folio
 
index 0f265b8e73ec22c48283c3649251315ba5d7f175..f33df46dae4e532f83f559c9fa58cd2e82c3fc5d 100644 (file)
@@ -83,7 +83,7 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
 {
        unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
        struct folio *folio = page_folio(pfn_to_page(pfn));
-       int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+       int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
 
        /*
         * Since icaches do not snoop for updated data on OpenRISC, we
index 37ca484cc49511a3876f9d318738075893c10fc3..4c5240d3a3c7cfbee54a96c3f3c4b83fe445b556 100644 (file)
@@ -122,10 +122,10 @@ void __update_cache(pte_t pte)
        pfn = folio_pfn(folio);
        nr = folio_nr_pages(folio);
        if (folio_flush_mapping(folio) &&
-           test_bit(PG_dcache_dirty, &folio->flags)) {
+           test_bit(PG_dcache_dirty, &folio->flags.f)) {
                while (nr--)
                        flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
-               clear_bit(PG_dcache_dirty, &folio->flags);
+               clear_bit(PG_dcache_dirty, &folio->flags.f);
        } else if (parisc_requires_coherency())
                while (nr--)
                        flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
@@ -481,7 +481,7 @@ void flush_dcache_folio(struct folio *folio)
        pgoff_t pgoff;
 
        if (mapping && !mapping_mapped(mapping)) {
-               set_bit(PG_dcache_dirty, &folio->flags);
+               set_bit(PG_dcache_dirty, &folio->flags.f);
                return;
        }
 
index f2656774aaa94ef17e5b46db9ba92d0a6138b3fd..1fea42928f641d1d19463b8724467c4ee94ce65f 100644 (file)
@@ -40,8 +40,8 @@ static inline void flush_dcache_folio(struct folio *folio)
        if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
                return;
        /* avoid an atomic op if possible */
-       if (test_bit(PG_dcache_clean, &folio->flags))
-               clear_bit(PG_dcache_clean, &folio->flags);
+       if (test_bit(PG_dcache_clean, &folio->flags.f))
+               clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 #define flush_dcache_folio flush_dcache_folio
 
index ca3829d47ab7397a3bbd78ed07f4e2b221981a57..0953f2daa4660f8d205c3a37b807859ba0dd625f 100644 (file)
@@ -939,9 +939,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
 
        /* Clear i-cache for new pages */
        folio = page_folio(pfn_to_page(pfn));
-       if (!test_bit(PG_dcache_clean, &folio->flags)) {
+       if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
                flush_dcache_icache_folio(folio);
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 
index 4693c464fc5aff1a00029c9a57f287ff71e6b5c6..3aee3af614af89c442f16605ff96001f345a4da0 100644 (file)
@@ -1562,11 +1562,11 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
        folio = page_folio(pte_page(pte));
 
        /* page is dirty */
-       if (!test_bit(PG_dcache_clean, &folio->flags) &&
+       if (!test_bit(PG_dcache_clean, &folio->flags.f) &&
            !folio_test_reserved(folio)) {
                if (trap == INTERRUPT_INST_STORAGE) {
                        flush_dcache_icache_folio(folio);
-                       set_bit(PG_dcache_clean, &folio->flags);
+                       set_bit(PG_dcache_clean, &folio->flags.f);
                } else
                        pp |= HPTE_R_N;
        }
index dfaa9fd86f7eaac9562607e5f33010eac785d00e..56d7e8960e77d877390d6bcbf0d8a305cc222101 100644 (file)
@@ -87,9 +87,9 @@ static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
                struct folio *folio = maybe_pte_to_folio(pte);
                if (!folio)
                        return pte;
-               if (!test_bit(PG_dcache_clean, &folio->flags)) {
+               if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
                        flush_dcache_icache_folio(folio);
-                       set_bit(PG_dcache_clean, &folio->flags);
+                       set_bit(PG_dcache_clean, &folio->flags.f);
                }
        }
        return pte;
@@ -127,13 +127,13 @@ static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
                return pte;
 
        /* If the page clean, we move on */
-       if (test_bit(PG_dcache_clean, &folio->flags))
+       if (test_bit(PG_dcache_clean, &folio->flags.f))
                return pte;
 
        /* If it's an exec fault, we flush the cache and make it clean */
        if (is_exec_fault()) {
                flush_dcache_icache_folio(folio);
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
                return pte;
        }
 
@@ -175,12 +175,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
                goto bail;
 
        /* If the page is already clean, we move on */
-       if (test_bit(PG_dcache_clean, &folio->flags))
+       if (test_bit(PG_dcache_clean, &folio->flags.f))
                goto bail;
 
        /* Clean the page and set PG_dcache_clean */
        flush_dcache_icache_folio(folio);
-       set_bit(PG_dcache_clean, &folio->flags);
+       set_bit(PG_dcache_clean, &folio->flags.f);
 
  bail:
        return pte_mkexec(pte);
index 6086b38d54279d4227c04fe0628f598f9941832f..0092513c3376c56e1211f0f9514d90d1bf436bde 100644 (file)
@@ -23,8 +23,8 @@ static inline void local_flush_icache_range(unsigned long start,
 
 static inline void flush_dcache_folio(struct folio *folio)
 {
-       if (test_bit(PG_dcache_clean, &folio->flags))
-               clear_bit(PG_dcache_clean, &folio->flags);
+       if (test_bit(PG_dcache_clean, &folio->flags.f))
+               clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 #define flush_dcache_folio flush_dcache_folio
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
index 4461264977684b1f306a3435a5533b64dbd366a1..0872d43fc0c02f0dc0586214ac927ffee880b453 100644 (file)
@@ -7,7 +7,7 @@
 
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
-       clear_bit(PG_dcache_clean, &folio->flags);
+       clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
index 4ca5aafce22eb5fdad36d73f699a5780fb4ad564..d83a612464f6cddf04dc91d34e40173a84ea2c9a 100644 (file)
@@ -101,9 +101,9 @@ void flush_icache_pte(struct mm_struct *mm, pte_t pte)
 {
        struct folio *folio = page_folio(pte_page(pte));
 
-       if (!test_bit(PG_dcache_clean, &folio->flags)) {
+       if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
                flush_icache_mm(mm, false);
-               set_bit(PG_dcache_clean, &folio->flags);
+               set_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 #endif /* CONFIG_MMU */
index 931fcc413598d2705960b83ddf6bd526e7720157..69131736daaa7a47a28765134d45af1011113978 100644 (file)
@@ -39,7 +39,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
-       clear_bit(PG_arch_1, &folio->flags);
+       clear_bit(PG_arch_1, &folio->flags.f);
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
index 47f574cd1728a0fffa972ad0adc8f75189ce7d44..93b2a01bae4091fb248011c3a5043d614b186f9e 100644 (file)
@@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio)
        folio_get(folio);
        rc = uv_destroy(folio_to_phys(folio));
        if (!rc)
-               clear_bit(PG_arch_1, &folio->flags);
+               clear_bit(PG_arch_1, &folio->flags.f);
        folio_put(folio);
        return rc;
 }
@@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
        folio_get(folio);
        rc = uv_convert_from_secure(folio_to_phys(folio));
        if (!rc)
-               clear_bit(PG_arch_1, &folio->flags);
+               clear_bit(PG_arch_1, &folio->flags.f);
        folio_put(folio);
        return rc;
 }
@@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
        expected = expected_folio_refs(folio) + 1;
        if (!folio_ref_freeze(folio, expected))
                return -EBUSY;
-       set_bit(PG_arch_1, &folio->flags);
+       set_bit(PG_arch_1, &folio->flags.f);
        /*
         * If the UVC does not succeed or fail immediately, we don't want to
         * loop for long, or we might get stall notifications.
@@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio)
         *    convert_to_secure.
         * As secure pages are never large folios, both variants can co-exists.
         */
-       if (!test_bit(PG_arch_1, &folio->flags))
+       if (!test_bit(PG_arch_1, &folio->flags.f))
                return 0;
 
        rc = uv_pin_shared(folio_to_phys(folio));
        if (!rc) {
-               clear_bit(PG_arch_1, &folio->flags);
+               clear_bit(PG_arch_1, &folio->flags.f);
                return 0;
        }
 
        rc = uv_convert_from_secure(folio_to_phys(folio));
        if (!rc) {
-               clear_bit(PG_arch_1, &folio->flags);
+               clear_bit(PG_arch_1, &folio->flags.f);
                return 0;
        }
 
index c7defe4ed1f6c6d7bf3ab3c8bb8cec88fb5ffd2e..8ff6bba107e8880f5b453518188a778b6a499609 100644 (file)
@@ -2272,7 +2272,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
        start = pmd_val(*pmd) & HPAGE_MASK;
        end = start + HPAGE_SIZE;
        __storage_key_init_range(start, end);
-       set_bit(PG_arch_1, &folio->flags);
+       set_bit(PG_arch_1, &folio->flags.f);
        cond_resched();
        return 0;
 }
index e88c02c9e642916edda1960aa9af62bedafbc502..72e8fa136af56cdf37ee014bded61c20424bad06 100644 (file)
@@ -155,7 +155,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
                paddr = rste & PMD_MASK;
        }
 
-       if (!test_and_set_bit(PG_arch_1, &folio->flags))
+       if (!test_and_set_bit(PG_arch_1, &folio->flags.f))
                __storage_key_init_range(paddr, paddr + size);
 }
 
index 4a92e6e4d627e99f75e78f3b44fb6bd0c58d6a37..974512f359f0d6914f0c81e1c26cbd4f6c30d833 100644 (file)
@@ -14,7 +14,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
 
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
-       clear_bit(PG_dcache_clean, &folio->flags);
+       clear_bit(PG_dcache_clean, &folio->flags.f);
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
index 46393b00137e0bfd4a1cdc1b7538fb71c9ce6cf4..83fb34b39ca7c15b4ddd0508727e6b4281d3dcd0 100644 (file)
@@ -114,7 +114,7 @@ static void sh4_flush_dcache_folio(void *arg)
        struct address_space *mapping = folio_flush_mapping(folio);
 
        if (mapping && !mapping_mapped(mapping))
-               clear_bit(PG_dcache_clean, &folio->flags);
+               clear_bit(PG_dcache_clean, &folio->flags.f);
        else
 #endif
        {
index b509a407588fa1729bc9406ee1e541f9162fdf7f..71f8be9fc8e0ccfc04a37b26b00f850c93077f73 100644 (file)
@@ -138,7 +138,7 @@ static void sh7705_flush_dcache_folio(void *arg)
        struct address_space *mapping = folio_flush_mapping(folio);
 
        if (mapping && !mapping_mapped(mapping))
-               clear_bit(PG_dcache_clean, &folio->flags);
+               clear_bit(PG_dcache_clean, &folio->flags.f);
        else {
                unsigned long pfn = folio_pfn(folio);
                unsigned int i, nr = folio_nr_pages(folio);
index 6ebdeaff30212f5cdb08e49cffe89dfd55b908dd..c3f028bed04977db9a6458dd7cb8289a5fa7f87b 100644 (file)
@@ -64,14 +64,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
        struct folio *folio = page_folio(page);
 
        if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
-           test_bit(PG_dcache_clean, &folio->flags)) {
+           test_bit(PG_dcache_clean, &folio->flags.f)) {
                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(vto, src, len);
                kunmap_coherent(vto);
        } else {
                memcpy(dst, src, len);
                if (boot_cpu_data.dcache.n_aliases)
-                       clear_bit(PG_dcache_clean, &folio->flags);
+                       clear_bit(PG_dcache_clean, &folio->flags.f);
        }
 
        if (vma->vm_flags & VM_EXEC)
@@ -85,14 +85,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
        struct folio *folio = page_folio(page);
 
        if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
-           test_bit(PG_dcache_clean, &folio->flags)) {
+           test_bit(PG_dcache_clean, &folio->flags.f)) {
                void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(dst, vfrom, len);
                kunmap_coherent(vfrom);
        } else {
                memcpy(dst, src, len);
                if (boot_cpu_data.dcache.n_aliases)
-                       clear_bit(PG_dcache_clean, &folio->flags);
+                       clear_bit(PG_dcache_clean, &folio->flags.f);
        }
 }
 
@@ -105,7 +105,7 @@ void copy_user_highpage(struct page *to, struct page *from,
        vto = kmap_atomic(to);
 
        if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
-           test_bit(PG_dcache_clean, &src->flags)) {
+           test_bit(PG_dcache_clean, &src->flags.f)) {
                vfrom = kmap_coherent(from, vaddr);
                copy_page(vto, vfrom);
                kunmap_coherent(vfrom);
@@ -148,7 +148,7 @@ void __update_cache(struct vm_area_struct *vma,
 
        if (pfn_valid(pfn)) {
                struct folio *folio = page_folio(pfn_to_page(pfn));
-               int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
+               int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f);
                if (dirty)
                        __flush_purge_region(folio_address(folio),
                                                folio_size(folio));
@@ -162,7 +162,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
        if (pages_do_alias(addr, vmaddr)) {
                if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
-                   test_bit(PG_dcache_clean, &folio->flags)) {
+                   test_bit(PG_dcache_clean, &folio->flags.f)) {
                        void *kaddr;
 
                        kaddr = kmap_coherent(page, vmaddr);
index fa50e8f6e7a91bcbd51e1a8ea458486ec3107156..c9f32d5a54b8412f7567e4535c07fb169c8e6a0b 100644 (file)
@@ -31,7 +31,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
-       BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
+       BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f));
 
        preempt_disable();
        pagefault_disable();
index 7ed58bf3aacaa46e5300bc743e9b73c881fa56cd..df9f7c444c396d77f32578a9bafb8fb6ec194189 100644 (file)
@@ -224,7 +224,7 @@ inline void flush_dcache_folio_impl(struct folio *folio)
        ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
 
 #define dcache_dirty_cpu(folio) \
-       (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
+       (((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
 
 static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
 {
@@ -243,7 +243,7 @@ static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
                             "bne,pn    %%xcc, 1b\n\t"
                             " nop"
                             : /* no outputs */
-                            : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags)
+                            : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f)
                             : "g1", "g7");
 }
 
@@ -265,7 +265,7 @@ static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu
                             " nop\n"
                             "2:"
                             : /* no outputs */
-                            : "r" (cpu), "r" (mask), "r" (&folio->flags),
+                            : "r" (cpu), "r" (mask), "r" (&folio->flags.f),
                               "i" (PG_dcache_cpu_mask),
                               "i" (PG_dcache_cpu_shift)
                             : "g1", "g7");
@@ -292,7 +292,7 @@ static void flush_dcache(unsigned long pfn)
                struct folio *folio = page_folio(page);
                unsigned long pg_flags;
 
-               pg_flags = folio->flags;
+               pg_flags = folio->flags.f;
                if (pg_flags & (1UL << PG_dcache_dirty)) {
                        int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
                                   PG_dcache_cpu_mask);
@@ -480,7 +480,7 @@ void flush_dcache_folio(struct folio *folio)
 
        mapping = folio_flush_mapping(folio);
        if (mapping && !mapping_mapped(mapping)) {
-               bool dirty = test_bit(PG_dcache_dirty, &folio->flags);
+               bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f);
                if (dirty) {
                        int dirty_cpu = dcache_dirty_cpu(folio);
 
index 23be0e7516cede9446c2d446740188c510b034e1..5354df52d61f2fe11e3ac84702f7ceacdb52a231 100644 (file)
@@ -134,8 +134,8 @@ void flush_dcache_folio(struct folio *folio)
         */
 
        if (mapping && !mapping_mapped(mapping)) {
-               if (!test_bit(PG_arch_1, &folio->flags))
-                       set_bit(PG_arch_1, &folio->flags);
+               if (!test_bit(PG_arch_1, &folio->flags.f))
+                       set_bit(PG_arch_1, &folio->flags.f);
                return;
 
        } else {
@@ -232,7 +232,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
 
 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
-       if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
+       if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags.f)) {
                unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
                unsigned long tmp;
 
@@ -247,10 +247,10 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
                }
                preempt_enable();
 
-               clear_bit(PG_arch_1, &folio->flags);
+               clear_bit(PG_arch_1, &folio->flags.f);
        }
 #else
-       if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
+       if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags.f)
            && (vma->vm_flags & VM_EXEC) != 0) {
                for (i = 0; i < nr; i++) {
                        void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
@@ -258,7 +258,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
                        __invalidate_icache_page((unsigned long)paddr);
                        kunmap_local(paddr);
                }
-               set_bit(PG_arch_1, &folio->flags);
+               set_bit(PG_arch_1, &folio->flags.f);
        }
 #endif
 }