static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
 
+
+static inline bool kvm_available_flush_tlb_with_range(void)
+{
+       return kvm_x86_ops->tlb_remote_flush_with_range;
+}
+
+static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
+               struct kvm_tlb_range *range)
+{
+       int ret = -ENOTSUPP;
+
+       if (range && kvm_x86_ops->tlb_remote_flush_with_range)
+               ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+
+       if (ret)
+               kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
+               u64 start_gfn, u64 pages)
+{
+       struct kvm_tlb_range range;
+
+       range.start_gfn = start_gfn;
+       range.pages = pages;
+
+       kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
 {
        BUG_ON((mmio_mask & mmio_value) != mmio_value);
                        !kvm_is_reserved_pfn(pfn) &&
                        PageTransCompoundMap(pfn_to_page(pfn))) {
                        pte_list_remove(rmap_head, sptep);
-                       need_tlb_flush = 1;
+
+                       if (kvm_available_flush_tlb_with_range())
+                               kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
+                                       KVM_PAGES_PER_HPAGE(sp->role.level));
+                       else
+                               need_tlb_flush = 1;
+
                        goto restart;
                }
        }