#define __ASM_HUGETLB_H
 
 #include <asm/cacheflush.h>
+#include <asm/mte.h>
 #include <asm/page.h>
 
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
        clear_bit(PG_dcache_clean, &folio->flags);
+
+#ifdef CONFIG_ARM64_MTE
+       if (system_supports_mte()) {
+               clear_bit(PG_mte_tagged, &folio->flags);
+               clear_bit(PG_mte_lock, &folio->flags);
+       }
+#endif
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
 
         * backed by tags-capable memory. The vm_flags may be overridden by a
         * filesystem supporting MTE (RAM-based).
         */
-       if (system_supports_mte() && (flags & MAP_ANONYMOUS))
+       if (system_supports_mte() &&
+           (flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
                return VM_MTE_ALLOWED;
 
        return 0;
 
 
 static inline void set_page_mte_tagged(struct page *page)
 {
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        /*
         * Ensure that the tags written prior to this function are visible
         * before the page flags update.
 {
        bool ret = test_bit(PG_mte_tagged, &page->flags);
 
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        /*
         * If the page is tagged, ensure ordering with a likely subsequent
         * read of the tags.
  */
 static inline bool try_page_mte_tagging(struct page *page)
 {
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        if (!test_and_set_bit(PG_mte_lock, &page->flags))
                return true;
 
 
 #endif /* CONFIG_ARM64_MTE */
 
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       /*
+        * Ensure that the tags written prior to this function are visible
+        * before the folio flags update.
+        */
+       smp_wmb();
+       set_bit(PG_mte_tagged, &folio->flags);
+
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+       bool ret = test_bit(PG_mte_tagged, &folio->flags);
+
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       /*
+        * If the folio is tagged, ensure ordering with a likely subsequent
+        * read of the tags.
+        */
+       if (ret)
+               smp_rmb();
+       return ret;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+               return true;
+
+       /*
+        * The tags are either being initialised or may have been initialised
+        * already. Check if the PG_mte_tagged flag has been set or wait
+        * otherwise.
+        */
+       smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+
+       return false;
+}
+#else
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+       return false;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+       return false;
+}
+#endif
+
 static inline void mte_disable_tco_entry(struct task_struct *task)
 {
        if (!system_supports_mte())
 
                max_zone_pfn = zone_end_pfn(zone);
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
                        struct page *page = pfn_to_online_page(pfn);
+                       struct folio *folio;
 
                        if (!page)
                                continue;
+                       folio = page_folio(page);
+
+                       if (folio_test_hugetlb(folio) &&
+                           !folio_test_hugetlb_mte_tagged(folio))
+                               continue;
 
                        if (!page_mte_tagged(page))
                                continue;
 
 void mte_sync_tags(pte_t pte, unsigned int nr_pages)
 {
        struct page *page = pte_page(pte);
-       unsigned int i;
+       struct folio *folio = page_folio(page);
+       unsigned long i;
+
+       if (folio_test_hugetlb(folio)) {
+               unsigned long nr = folio_nr_pages(folio);
+
+               /* Hugetlb MTE flags are set for head page only */
+               if (folio_try_hugetlb_mte_tagging(folio)) {
+                       for (i = 0; i < nr; i++, page++)
+                               mte_clear_page_tags(page_address(page));
+                       folio_set_hugetlb_mte_tagged(folio);
+               }
+
+               /* ensure the tags are visible before the PTE is set */
+               smp_wmb();
+
+               return;
+       }
 
        /* if PG_mte_tagged is set, tags have already been initialised */
        for (i = 0; i < nr_pages; i++, page++) {
                void *maddr;
                struct page *page = get_user_page_vma_remote(mm, addr,
                                                             gup_flags, &vma);
+               struct folio *folio;
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
                        put_page(page);
                        break;
                }
-               WARN_ON_ONCE(!page_mte_tagged(page));
+
+               folio = page_folio(page);
+               if (folio_test_hugetlb(folio))
+                       WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
+               else
+                       WARN_ON_ONCE(!page_mte_tagged(page));
 
                /* limit access to the end of the page */
                offset = offset_in_page(addr);
 
                void *maddr;
                unsigned long num_tags;
                struct page *page;
+               struct folio *folio;
 
                if (is_error_noslot_pfn(pfn)) {
                        ret = -EFAULT;
                        ret = -EFAULT;
                        goto out;
                }
+               folio = page_folio(page);
                maddr = page_address(page);
 
                if (!write) {
-                       if (page_mte_tagged(page))
+                       if ((folio_test_hugetlb(folio) &&
+                            folio_test_hugetlb_mte_tagged(folio)) ||
+                            page_mte_tagged(page))
                                num_tags = mte_copy_tags_to_user(tags, maddr,
                                                        MTE_GRANULES_PER_PAGE);
                        else
                         * __set_ptes() in the VMM but still overriding the
                         * tags, hence ignoring the return value.
                         */
-                       try_page_mte_tagging(page);
+                       if (folio_test_hugetlb(folio))
+                               folio_try_hugetlb_mte_tagging(folio);
+                       else
+                               try_page_mte_tagging(page);
                        num_tags = mte_copy_tags_from_user(maddr, tags,
                                                        MTE_GRANULES_PER_PAGE);
 
                        /* uaccess failed, don't leave stale tags */
                        if (num_tags != MTE_GRANULES_PER_PAGE)
                                mte_clear_page_tags(maddr);
-                       set_page_mte_tagged(page);
+                       if (folio_test_hugetlb(folio))
+                               folio_set_hugetlb_mte_tagged(folio);
+                       else
+                               set_page_mte_tagged(page);
 
                        kvm_release_pfn_dirty(pfn);
                }
 
 {
        unsigned long i, nr_pages = size >> PAGE_SHIFT;
        struct page *page = pfn_to_page(pfn);
+       struct folio *folio = page_folio(page);
 
        if (!kvm_has_mte(kvm))
                return;
 
+       if (folio_test_hugetlb(folio)) {
+               /* Hugetlb has MTE flags set on head page only */
+               if (folio_try_hugetlb_mte_tagging(folio)) {
+                       for (i = 0; i < nr_pages; i++, page++)
+                               mte_clear_page_tags(page_address(page));
+                       folio_set_hugetlb_mte_tagged(folio);
+               }
+               return;
+       }
+
        for (i = 0; i < nr_pages; i++, page++) {
                if (try_page_mte_tagging(page)) {
                        mte_clear_page_tags(page_address(page));
 
 {
        void *kto = page_address(to);
        void *kfrom = page_address(from);
+       struct folio *src = page_folio(from);
+       struct folio *dst = page_folio(to);
+       unsigned int i, nr_pages;
 
        copy_page(kto, kfrom);
 
        if (kasan_hw_tags_enabled())
                page_kasan_tag_reset(to);
 
-       if (system_supports_mte() && page_mte_tagged(from)) {
+       if (!system_supports_mte())
+               return;
+
+       if (folio_test_hugetlb(src) &&
+           folio_test_hugetlb_mte_tagged(src)) {
+               if (!folio_try_hugetlb_mte_tagging(dst))
+                       return;
+
+               /*
+                * Populate tags for all subpages.
+                *
+                * Don't assume the first page is head page since
+                * huge page copy may start from any subpage.
+                */
+               nr_pages = folio_nr_pages(src);
+               for (i = 0; i < nr_pages; i++) {
+                       kfrom = page_address(folio_page(src, i));
+                       kto = page_address(folio_page(dst, i));
+                       mte_copy_page_tags(kto, kfrom);
+               }
+               folio_set_hugetlb_mte_tagged(dst);
+       } else if (page_mte_tagged(from)) {
                /* It's a new page, shouldn't have been tagged yet */
                WARN_ON_ONCE(!try_page_mte_tagging(to));
+
                mte_copy_page_tags(kto, kfrom);
                set_page_mte_tagged(to);
        }
 
         * way when do_mmap unwinds (may be important on powerpc
         * and ia64).
         */
-       vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
+       vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
        vma->vm_ops = &hugetlb_vm_ops;
 
        ret = seal_check_write(info->seals, vma);