*        kvm_pgtable_prot format.
  */
 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
+
+/**
+ * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
+ *
+ * @mmu:       Stage-2 KVM MMU struct
+ * @addr:      The base Intermediate physical address from which to invalidate
+ * @size:      Size of the range from the base to invalidate
+ */
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+                               phys_addr_t addr, size_t size);
 #endif /* __ARM64_KVM_PGTABLE_H__ */
 
        return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
 }
 
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+                               phys_addr_t addr, size_t size)
+{
+       unsigned long pages, inval_pages;
+
+       if (!system_supports_tlb_range()) {
+               kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+               return;
+       }
+
+       pages = size >> PAGE_SHIFT;
+       while (pages > 0) {
+               inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
+               kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
+
+               addr += inval_pages << PAGE_SHIFT;
+               pages -= inval_pages;
+       }
+}
+
 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
 
 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,