return NULL;
 }
 
-static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-       list_del(&sp->oos_link);
-       --kvm->stat.mmu_unsync_global;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
-       if (sp->global)
-               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
        pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
        sp->gfn = gfn;
        sp->role = role;
-       sp->global = 0;
        hlist_add_head(&sp->hash_link, bucket);
        if (!direct) {
                if (rmap_write_protect(vcpu->kvm, gfn))
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
 
-       if (sp->global) {
-               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
-               ++vcpu->kvm->stat.mmu_unsync_global;
-       } else
-               kvm_mmu_mark_parents_unsync(vcpu, sp);
+       kvm_mmu_mark_parents_unsync(vcpu, sp);
 
        mmu_convert_notrap(sp);
        return 0;
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
+                   gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
        u64 mt_mask = shadow_mt_mask;
-       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
-
-       if (!global && sp->global) {
-               sp->global = 0;
-               if (sp->unsync) {
-                       kvm_unlink_unsync_global(vcpu->kvm, sp);
-                       kvm_mmu_mark_parents_unsync(vcpu, sp);
-               }
-       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, int global,
-                        gfn_t gfn, pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, gfn_t gfn,
+                        pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
                        was_rmapped = 1;
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, global, gfn, pfn, speculative, true)) {
+                     dirty, largepage, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
                    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    largepage, 0, gfn, pfn, false);
+                                    largepage, gfn, pfn, false);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
        }
 }
 
-static void mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_page *sp, *n;
-
-       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
-               kvm_sync_page(vcpu, sp);
-}
-
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_sync_global(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
 
        kvm_get_pfn(pfn);
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
                     gpte & PT_DIRTY_MASK, NULL, largepage,
-                    gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
-                    pfn, true);
+                    gpte_to_gfn(gpte), pfn, true);
 }
 
 /*
                                     user_fault, write_fault,
                                     gw->ptes[gw->level-1] & PT_DIRTY_MASK,
                                     ptwrite, largepage,
-                                    gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
                                     gw->gfn, pfn, false);
                        break;
                }
                nr_present++;
                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
-                        is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
+                        is_dirty_pte(gpte), 0, gfn,
                         spte_to_pfn(sp->spt[i]), true, false);
        }
 
 
        { "mmu_recycled", VM_STAT(mmu_recycled) },
        { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
        { "mmu_unsync", VM_STAT(mmu_unsync) },
-       { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
        { "largepages", VM_STAT(lpages) },
        { NULL }
        kvm_x86_ops->set_cr0(vcpu, cr0);
        vcpu->arch.cr0 = cr0;
 
-       kvm_mmu_sync_global(vcpu);
        kvm_mmu_reset_context(vcpu);
        return;
 }
        kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->arch.cr4 = cr4;
        vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
-       kvm_mmu_sync_global(vcpu);
        kvm_mmu_reset_context(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr4);
                return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
        /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */