gpte_to_gfn(gpte), pfn, true, true);
 }
 
+static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
+                               struct guest_walker *gw, int level)
+{
+       int r;
+       pt_element_t curr_pte;
+
+       r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 1],
+                                 &curr_pte, sizeof(curr_pte));
+       return r || curr_pte != gw->ptes[level - 1];
+}
+
 /*
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
        u64 *sptep = NULL;
        int direct;
        gfn_t table_gfn;
-       int r;
        int level;
        bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
        unsigned direct_access;
-       pt_element_t curr_pte;
        struct kvm_shadow_walk_iterator iterator;
 
        if (!is_present_gpte(gw->ptes[gw->level - 1]))
                }
                sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
                                               direct, access, sptep);
-               if (!direct) {
-                       r = kvm_read_guest_atomic(vcpu->kvm,
-                                                 gw->pte_gpa[level - 2],
-                                                 &curr_pte, sizeof(curr_pte));
-                       if (r || curr_pte != gw->ptes[level - 2]) {
+               if (!direct)
+                       /*
+                        * Verify that the gpte in the page we've just write
+                        * protected is still there.
+                        */
+                       if (FNAME(gpte_changed)(vcpu, gw, level - 1)) {
                                kvm_mmu_put_page(sp, sptep);
                                kvm_release_pfn_clean(pfn);
                                sptep = NULL;
                                break;
                        }
-               }
 
                link_shadow_page(sptep, sp);
        }