return level;
 }
 
-int kvm_mmu_max_mapping_level(struct kvm *kvm,
-                             const struct kvm_memory_slot *slot, gfn_t gfn,
-                             int max_level)
+static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot,
+                                      gfn_t gfn, int max_level, bool is_private)
 {
        struct kvm_lpage_info *linfo;
        int host_level;
                        break;
        }
 
+       if (is_private)
+               return max_level;
+
        if (max_level == PG_LEVEL_4K)
                return PG_LEVEL_4K;
 
        return min(host_level, max_level);
 }
 
+int kvm_mmu_max_mapping_level(struct kvm *kvm,
+                             const struct kvm_memory_slot *slot, gfn_t gfn,
+                             int max_level)
+{
+       bool is_private = kvm_slot_can_be_private(slot) &&
+                         kvm_mem_is_private(kvm, gfn);
+
+       return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
+}
+
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        struct kvm_memory_slot *slot = fault->slot;
         * Enforce the iTLB multihit workaround after capturing the requested
         * level, which will be used to do precise, accurate accounting.
         */
-       fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
-                                                    fault->gfn, fault->max_level);
+       fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
+                                                      fault->gfn, fault->max_level,
+                                                      fault->is_private);
        if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
                return;
 
        kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
 }
 
+static inline u8 kvm_max_level_for_order(int order)
+{
+       BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
+
+       KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
+               return PG_LEVEL_1G;
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+               return PG_LEVEL_2M;
+
+       return PG_LEVEL_4K;
+}
+
+static void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+                                             struct kvm_page_fault *fault)
+{
+       kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
+                                     PAGE_SIZE, fault->write, fault->exec,
+                                     fault->is_private);
+}
+
+static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
+                                  struct kvm_page_fault *fault)
+{
+       int max_order, r;
+
+       if (!kvm_slot_can_be_private(fault->slot)) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return -EFAULT;
+       }
+
+       r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
+                            &max_order);
+       if (r) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return r;
+       }
+
+       fault->max_level = min(kvm_max_level_for_order(max_order),
+                              fault->max_level);
+       fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
+
+       return RET_PF_CONTINUE;
+}
+
 static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        struct kvm_memory_slot *slot = fault->slot;
                        return RET_PF_EMULATE;
        }
 
+       if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return -EFAULT;
+       }
+
+       if (fault->is_private)
+               return kvm_faultin_pfn_private(vcpu, fault);
+
        async = false;
        fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
                                          fault->write, &fault->map_writable,
 }
 
 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+                                       struct kvm_gfn_range *range)
+{
+       /*
+        * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
+        * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
+        * can simply ignore such slots.  But if userspace is making memory
+        * PRIVATE, then KVM must prevent the guest from accessing the memory
+        * as shared.  And if userspace is making memory SHARED and this point
+        * is reached, then at least one page within the range was previously
+        * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
+        * Zapping SPTEs in this case ensures KVM will reassess whether or not
+        * a hugepage can be used for affected ranges.
+        */
+       if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+               return false;
+
+       return kvm_unmap_gfn_range(kvm, range);
+}
+
 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
                                int level)
 {