return flush;
 }
 
+static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
+{
+       u64 spte = *sptep;
+
+       rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
+
+       spte &= ~shadow_dirty_mask;
+
+       return mmu_spte_update(sptep, spte);
+}
+
+static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *sptep;
+       struct rmap_iterator iter;
+       bool flush = false;
+
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+               flush |= spte_clear_dirty(kvm, sptep);
+               sptep = rmap_get_next(&iter);
+       }
+
+       return flush;
+}
+
+static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
+{
+       u64 spte = *sptep;
+
+       rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
+
+       spte |= shadow_dirty_mask;
+
+       return mmu_spte_update(sptep, spte);
+}
+
+static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
+{
+       u64 *sptep;
+       struct rmap_iterator iter;
+       bool flush = false;
+
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+               flush |= spte_set_dirty(kvm, sptep);
+               sptep = rmap_get_next(&iter);
+       }
+
+       return flush;
+}
+
 /**
  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
  * @kvm: kvm instance
        }
 }
 
+/**
+ * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to clear D-bit
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should clear D-bit
+ *
+ * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
+ */
+void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+                                    struct kvm_memory_slot *slot,
+                                    gfn_t gfn_offset, unsigned long mask)
+{
+       unsigned long *rmapp;
+
+       while (mask) {
+               rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+                                     PT_PAGE_TABLE_LEVEL, slot);
+               __rmap_clear_dirty(kvm, rmapp);
+
+               /* clear the first set bit */
+               mask &= mask - 1;
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
+
 /**
  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
  * PT level pages.
                kvm_flush_remote_tlbs(kvm);
 }
 
+void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot)
+{
+       gfn_t last_gfn;
+       unsigned long *rmapp;
+       unsigned long last_index, index;
+       bool flush = false;
+
+       last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+       spin_lock(&kvm->mmu_lock);
+
+       rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
+       last_index = gfn_to_index(last_gfn, memslot->base_gfn,
+                       PT_PAGE_TABLE_LEVEL);
+
+       for (index = 0; index <= last_index; ++index, ++rmapp) {
+               if (*rmapp)
+                       flush |= __rmap_clear_dirty(kvm, rmapp);
+
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+                       cond_resched_lock(&kvm->mmu_lock);
+       }
+
+       spin_unlock(&kvm->mmu_lock);
+
+       lockdep_assert_held(&kvm->slots_lock);
+
+       /*
+        * It's also safe to flush TLBs out of mmu lock here as currently this
+        * function is only used for dirty logging, in which case flushing TLB
+        * out of mmu lock also guarantees no dirty pages will be lost in
+        * dirty_bitmap.
+        */
+       if (flush)
+               kvm_flush_remote_tlbs(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
+
+void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+                                       struct kvm_memory_slot *memslot)
+{
+       gfn_t last_gfn;
+       int i;
+       bool flush = false;
+
+       last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+       spin_lock(&kvm->mmu_lock);
+
+       for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
+            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+               unsigned long *rmapp;
+               unsigned long last_index, index;
+
+               rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
+               last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
+
+               for (index = 0; index <= last_index; ++index, ++rmapp) {
+                       if (*rmapp)
+                               flush |= __rmap_write_protect(kvm, rmapp,
+                                               false);
+
+                       if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+                               cond_resched_lock(&kvm->mmu_lock);
+               }
+       }
+       spin_unlock(&kvm->mmu_lock);
+
+       /* see kvm_mmu_slot_remove_write_access */
+       lockdep_assert_held(&kvm->slots_lock);
+
+       if (flush)
+               kvm_flush_remote_tlbs(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+
+void kvm_mmu_slot_set_dirty(struct kvm *kvm,
+                           struct kvm_memory_slot *memslot)
+{
+       gfn_t last_gfn;
+       int i;
+       bool flush = false;
+
+       last_gfn = memslot->base_gfn + memslot->npages - 1;
+
+       spin_lock(&kvm->mmu_lock);
+
+       for (i = PT_PAGE_TABLE_LEVEL;
+            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+               unsigned long *rmapp;
+               unsigned long last_index, index;
+
+               rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
+               last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
+
+               for (index = 0; index <= last_index; ++index, ++rmapp) {
+                       if (*rmapp)
+                               flush |= __rmap_set_dirty(kvm, rmapp);
+
+                       if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+                               cond_resched_lock(&kvm->mmu_lock);
+               }
+       }
+
+       spin_unlock(&kvm->mmu_lock);
+
+       lockdep_assert_held(&kvm->slots_lock);
+
+       /* see kvm_mmu_slot_leaf_clear_dirty */
+       if (flush)
+               kvm_flush_remote_tlbs(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
+
 #define BATCH_ZAP_PAGES        10
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {