union kvm_mmu_page_role base_role;
        bool direct_map;
 
+       /*
+        * Bitmap; bit set = permission fault
+        * Byte index: page fault error code [4:1]
+        * Bit index: pte permissions in ACC_* format
+        */
+       u8 permissions[16];
+
        u64 *pae_root;
        u64 *lm_root;
        u64 rsvd_bits_mask[2][4];
 
        }
 }
 
+static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+{
+       unsigned bit, byte, pfec;
+       u8 map;
+       bool fault, x, w, u, wf, uf, ff, smep;
+
+       smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+       for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
+               pfec = byte << 1;
+               map = 0;
+               wf = pfec & PFERR_WRITE_MASK;
+               uf = pfec & PFERR_USER_MASK;
+               ff = pfec & PFERR_FETCH_MASK;
+               for (bit = 0; bit < 8; ++bit) {
+                       x = bit & ACC_EXEC_MASK;
+                       w = bit & ACC_WRITE_MASK;
+                       u = bit & ACC_USER_MASK;
+
+                       /* Not really needed: !nx will cause pte.nx to fault */
+                       x |= !mmu->nx;
+                       /* Allow supervisor writes if !cr0.wp */
+                       w |= !is_write_protection(vcpu) && !uf;
+                       /* Disallow supervisor fetches of user code if cr4.smep */
+                       x &= !(smep && u && !uf);
+
+                       fault = (ff && !x) || (uf && !u) || (wf && !w);
+                       map |= fault << bit;
+               }
+               mmu->permissions[byte] = map;
+       }
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
                                        struct kvm_mmu *context,
                                        int level)
        context->root_level = level;
 
        reset_rsvds_bits_mask(vcpu, context);
+       update_permission_bitmask(vcpu, context);
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
        context->root_level = PT32_ROOT_LEVEL;
 
        reset_rsvds_bits_mask(vcpu, context);
+       update_permission_bitmask(vcpu, context);
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
                context->gva_to_gpa = paging32_gva_to_gpa;
        }
 
+       update_permission_bitmask(vcpu, context);
+
        return 0;
 }
 
                g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
        }
 
+       update_permission_bitmask(vcpu, g_context);
+
        return 0;
 }
 
 
        return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
 }
 
-static inline bool check_write_user_access(struct kvm_vcpu *vcpu,
-                                          bool write_fault, bool user_fault,
-                                          unsigned long pte)
+/*
+ * Will a fault with a given page-fault error code (pfec) cause a permission
+ * fault with the given access (in ACC_* format)?
+ */
+static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
+                                   unsigned pfec)
 {
-       if (unlikely(write_fault && !is_writable_pte(pte)
-             && (user_fault || is_write_protection(vcpu))))
-               return false;
-
-       if (unlikely(user_fault && !(pte & PT_USER_MASK)))
-               return false;
-
-       return true;
+       return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
 }
+
 #endif
 
        pt_element_t pte;
        pt_element_t __user *uninitialized_var(ptep_user);
        gfn_t table_gfn;
-       unsigned index, pt_access, uninitialized_var(pte_access);
+       unsigned index, pt_access, pte_access;
        gpa_t pte_gpa;
        bool eperm, last_gpte;
        int offset;
                        goto error;
                }
 
-               if (!check_write_user_access(vcpu, write_fault, user_fault,
-                                         pte))
-                       eperm = true;
-
-#if PTTYPE == 64
-               if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
-                       eperm = true;
-#endif
+               pte_access = pt_access & gpte_access(vcpu, pte);
 
                last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
-               if (last_gpte) {
-                       pte_access = pt_access & gpte_access(vcpu, pte);
-                       /* check if the kernel is fetching from user page */
-                       if (unlikely(pte_access & PT_USER_MASK) &&
-                           kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
-                               if (fetch_fault && !user_fault)
-                                       eperm = true;
-               }
 
                walker->ptes[walker->level - 1] = pte;
 
                        break;
                }
 
-               pt_access &= gpte_access(vcpu, pte);
+               pt_access &= pte_access;
                --walker->level;
        }
 
+       eperm |= permission_fault(mmu, pte_access, access);
        if (unlikely(eperm)) {
                errcode |= PFERR_PRESENT_MASK;
                goto error;
 
                                gpa_t *gpa, struct x86_exception *exception,
                                bool write)
 {
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
+               | (write ? PFERR_WRITE_MASK : 0);
 
-       if (vcpu_match_mmio_gva(vcpu, gva) &&
-                 check_write_user_access(vcpu, write, access,
-                 vcpu->arch.access)) {
+       if (vcpu_match_mmio_gva(vcpu, gva)
+           && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
                *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
                                        (gva & (PAGE_SIZE - 1));
                trace_vcpu_match_mmio(gva, *gpa, write, false);
                return 1;
        }
 
-       if (write)
-               access |= PFERR_WRITE_MASK;
-
        *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 
        if (*gpa == UNMAPPED_GVA)