#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS |   \
                                  0xFFFFFF0000000000ULL)
 
-#define KVM_GUEST_CR0_MASK                                \
-       (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
-        | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                          \
+       (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK                                             \
+       (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                                \
+       (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
 #define KVM_VM_CR0_ALWAYS_ON                                           \
-       (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
-        | X86_CR0_MP)
+       (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
 #define KVM_GUEST_CR4_MASK                                             \
        (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
 
 static int __read_mostly enable_ept = 1;
 module_param_named(ept, enable_ept, bool, S_IRUGO);
 
+static int __read_mostly enable_unrestricted_guest = 1;
+module_param_named(unrestricted_guest,
+                       enable_unrestricted_guest, bool, S_IRUGO);
+
 static int __read_mostly emulate_invalid_guest_state = 0;
 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 
                SECONDARY_EXEC_ENABLE_EPT;
 }
 
+static inline int cpu_has_vmx_unrestricted_guest(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_UNRESTRICTED_GUEST;
+}
+
 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
 {
        return flexpriority_enabled &&
                opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                        SECONDARY_EXEC_WBINVD_EXITING |
                        SECONDARY_EXEC_ENABLE_VPID |
-                       SECONDARY_EXEC_ENABLE_EPT;
+                       SECONDARY_EXEC_ENABLE_EPT |
+                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
        if (!cpu_has_vmx_vpid())
                enable_vpid = 0;
 
-       if (!cpu_has_vmx_ept())
+       if (!cpu_has_vmx_ept()) {
                enable_ept = 0;
+               enable_unrestricted_guest = 0;
+       }
+
+       if (!cpu_has_vmx_unrestricted_guest())
+               enable_unrestricted_guest = 0;
 
        if (!cpu_has_vmx_flexpriority())
                flexpriority_enabled = 0;
        unsigned long flags;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (enable_unrestricted_guest)
+               return;
+
        vmx->emulation_required = 1;
        vcpu->arch.rmode.vm86_active = 1;
 
                              CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
                vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               *hw_cr0 |= X86_CR0_PE | X86_CR0_PG;
                *hw_cr0 &= ~X86_CR0_WP;
        } else if (!is_paging(vcpu)) {
                /* From nonpaging to paging */
 
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
-       unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) |
-                               KVM_VM_CR0_ALWAYS_ON;
+       unsigned long hw_cr0;
+
+       if (enable_unrestricted_guest)
+               hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
+                       | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+       else
+               hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
 
        vmx_fpu_deactivate(vcpu);
 
                ar = 0xf3;
        } else
                ar = vmx_segment_access_rights(var);
+
+       /*
+        *   Fix the "Accessed" bit in AR field of segment registers for older
+        * qemu binaries.
+        *   IA32 arch specifies that at the time of processor reset the
+        * "Accessed" bit in the AR field of segment registers is 1. And qemu
+        * is setting it to 0 in the usedland code. This causes invalid guest
+        * state vmexit when "unrestricted guest" mode is turned on.
+        *    Fix for this setup issue in cpu_reset is being pushed in the qemu
+        * tree. Newer qemu binaries with that qemu fix would not need this
+        * kvm hack.
+        */
+       if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
+               ar |= 0x1; /* Accessed */
+
        vmcs_write32(sf->ar_bytes, ar);
 }
 
 static void seg_setup(int seg)
 {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+       unsigned int ar;
 
        vmcs_write16(sf->selector, 0);
        vmcs_writel(sf->base, 0);
        vmcs_write32(sf->limit, 0xffff);
-       vmcs_write32(sf->ar_bytes, 0xf3);
+       if (enable_unrestricted_guest) {
+               ar = 0x93;
+               if (seg == VCPU_SREG_CS)
+                       ar |= 0x08; /* code segment */
+       } else
+               ar = 0xf3;
+
+       vmcs_write32(sf->ar_bytes, ar);
 }
 
 static int alloc_apic_access_page(struct kvm *kvm)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
                if (!enable_ept)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+               if (!enable_unrestricted_guest)
+                       exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
        }