]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Pass walk flags to kvm_pgtable_stage2_mkyoung
authorQuentin Perret <qperret@google.com>
Wed, 18 Dec 2024 19:40:46 +0000 (19:40 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 09:43:59 +0000 (09:43 +0000)
kvm_pgtable_stage2_mkyoung currently assumes that it is being called
from a 'shared' walker, which will not be true once called from pKVM.
To allow for the re-use of that function, make the walk flags one of
its parameters.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-6-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/mmu.c

index aab04097b5054a8a48a237d8e79971f57034c7e1..38b7ec1c86141436a9cf698197647a72c042f46f 100644 (file)
@@ -669,13 +669,15 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
  * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
  * @addr:      Intermediate physical address to identify the page-table entry.
+ * @flags:     Flags to control the page-table walk (ex. a shared walk)
  *
  * The offset of @addr within a page is ignored.
  *
  * If there is a valid, leaf page-table entry used to translate @addr, then
  * set the access flag in that entry.
  */
-void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
+void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+                               enum kvm_pgtable_walk_flags flags);
 
 /**
  * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
index 40bd559665404d0b971314888dd0842179813d1c..0470aedb4bf440d0086ebd69b42397ae90cd8d25 100644 (file)
@@ -1245,14 +1245,13 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
                                        NULL, NULL, 0);
 }
 
-void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
+void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+                               enum kvm_pgtable_walk_flags flags)
 {
        int ret;
 
        ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
-                                      NULL, NULL,
-                                      KVM_PGTABLE_WALK_HANDLE_FAULT |
-                                      KVM_PGTABLE_WALK_SHARED);
+                                      NULL, NULL, flags);
        if (!ret)
                dsb(ishst);
 }
index c9d46ad57e52d3e06b58089dfe4a7838165ddf2d..a2339b76c826f49d2a9b721a0be964c6b34f5101 100644 (file)
@@ -1718,13 +1718,14 @@ out_unlock:
 /* Resolve the access fault by making the page young again. */
 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 {
+       enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
        struct kvm_s2_mmu *mmu;
 
        trace_kvm_access_fault(fault_ipa);
 
        read_lock(&vcpu->kvm->mmu_lock);
        mmu = vcpu->arch.hw_mmu;
-       kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
+       kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa, flags);
        read_unlock(&vcpu->kvm->mmu_lock);
 }