]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: arm64: Simplify the sanitise_mte_tags() logic
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 4 Nov 2022 01:10:36 +0000 (18:10 -0700)
committerMarc Zyngier <maz@kernel.org>
Tue, 29 Nov 2022 09:26:07 +0000 (09:26 +0000)
Currently sanitise_mte_tags() checks if it's an online page before
attempting to sanitise the tags. Such detection should be done in the
caller via the VM_MTE_ALLOWED vma flag. Since kvm_set_spte_gfn() does
not have the vma, leave the page unmapped if not already tagged. Tag
initialisation will be done on a subsequent access fault in
user_mem_abort().

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[pcc@google.com: fix the page initializer]
Signed-off-by: Peter Collingbourne <pcc@google.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Collingbourne <pcc@google.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221104011041.290951-4-pcc@google.com
arch/arm64/kvm/mmu.c

index 2c3759f1f2c5691a43ddcfc38f8b488a5c6ce43a..e81bfb7306298dfeee0f60cdbd4326cfe1f7d439 100644 (file)
@@ -1091,23 +1091,14 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
  * - mmap_lock protects between a VM faulting a page in and the VMM performing
  *   an mprotect() to add VM_MTE
  */
-static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
-                            unsigned long size)
+static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+                             unsigned long size)
 {
        unsigned long i, nr_pages = size >> PAGE_SHIFT;
-       struct page *page;
+       struct page *page = pfn_to_page(pfn);
 
        if (!kvm_has_mte(kvm))
-               return 0;
-
-       /*
-        * pfn_to_online_page() is used to reject ZONE_DEVICE pages
-        * that may not support tags.
-        */
-       page = pfn_to_online_page(pfn);
-
-       if (!page)
-               return -EFAULT;
+               return;
 
        for (i = 0; i < nr_pages; i++, page++) {
                if (!page_mte_tagged(page)) {
@@ -1115,8 +1106,6 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
                        set_page_mte_tagged(page);
                }
        }
-
-       return 0;
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1127,7 +1116,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        bool write_fault, writable, force_pte = false;
        bool exec_fault;
        bool device = false;
-       bool shared;
        unsigned long mmu_seq;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -1177,8 +1165,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                vma_shift = get_vma_page_shift(vma, hva);
        }
 
-       shared = (vma->vm_flags & VM_SHARED);
-
        switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
        case PUD_SHIFT:
@@ -1299,12 +1285,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new VM_SHARED VMA */
-               if (!shared)
-                       ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
-               else
+               if ((vma->vm_flags & VM_MTE_ALLOWED) &&
+                   !(vma->vm_flags & VM_SHARED)) {
+                       sanitise_mte_tags(kvm, pfn, vma_pagesize);
+               } else {
                        ret = -EFAULT;
-               if (ret)
                        goto out_unlock;
+               }
        }
 
        if (writable)
@@ -1526,15 +1513,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        kvm_pfn_t pfn = pte_pfn(range->pte);
-       int ret;
 
        if (!kvm->arch.mmu.pgt)
                return false;
 
        WARN_ON(range->end - range->start != 1);
 
-       ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
-       if (ret)
+       /*
+        * If the page isn't tagged, defer to user_mem_abort() for sanitising
+        * the MTE tags. The S2 pte should have been unmapped by
+        * mmu_notifier_invalidate_range_end().
+        */
+       if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
                return false;
 
        /*