#include <asm/processor.h>
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
+#include <asm/sgx.h>
 #include "cpuid.h"
 #include "lapic.h"
 #include "mmu.h"
                vcpu->arch.guest_supported_xcr0 =
                        (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
 
+       /*
+        * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
+        * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
+        * requested XCR0 value.  The enclave's XFRM must be a subset of XCRO
+        * at the time of EENTER, thus adjust the allowed XFRM by the guest's
+        * supported XCR0.  Similar to XCR0 handling, FP and SSE are forced to
+        * '1' even on CPUs that don't support XSAVE.
+        */
+       best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
+       if (best) {
+               best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
+               best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
+               best->ecx |= XFEATURE_MASK_FPSSE;
+       }
+
        kvm_update_pv_runtime(vcpu);
 
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
        );
 
        kvm_cpu_cap_mask(CPUID_7_0_EBX,
-               F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+               F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
                F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
                F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
                F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
                F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
-               F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
+               F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
+               F(SGX_LC)
        );
        /* Set LA57 based on hardware capability. */
        if (cpuid_ecx(7) & F(LA57))
                F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
        );
 
+       kvm_cpu_cap_init_scattered(CPUID_12_EAX,
+               SF(SGX1) | SF(SGX2)
+       );
+
        kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
                F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
                        entry->edx = 0;
                }
                break;
+       case 0x12:
+               /* Intel SGX */
+               if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
+                       entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+                       break;
+               }
+
+               /*
+                * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
+                * and max enclave sizes.   The SGX sub-features and MISCSELECT
+                * are restricted by kernel and KVM capabilities (like most
+                * feature flags), while enclave size is unrestricted.
+                */
+               cpuid_entry_override(entry, CPUID_12_EAX);
+               entry->ebx &= SGX_MISC_EXINFO;
+
+               entry = do_host_cpuid(array, function, 1);
+               if (!entry)
+                       goto out;
+
+               /*
+                * Index 1: SECS.ATTRIBUTES.  ATTRIBUTES are restricted a la
+                * feature flags.  Advertise all supported flags, including
+                * privileged attributes that require explicit opt-in from
+                * userspace.  ATTRIBUTES.XFRM is not adjusted as userspace is
+                * expected to derive it from supported XCR0.
+                */
+               entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
+                             /* PROVISIONKEY | */ SGX_ATTR_EINITTOKENKEY |
+                             SGX_ATTR_KSS;
+               entry->ebx &= 0;
+               break;
        /* Intel PT */
        case 0x14:
                if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
 
 #include "mmu.h"
 #include "nested.h"
 #include "pmu.h"
+#include "sgx.h"
 #include "trace.h"
 #include "vmx.h"
 #include "x86.h"
                if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
                    exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
+               if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
+                       vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
+
                secondary_exec_controls_set(vmx, exec_control);
        }
 
        return false;
 }
 
+static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
+                                         struct vmcs12 *vmcs12)
+{
+       u32 encls_leaf;
+
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
+           !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
+               return false;
+
+       encls_leaf = kvm_rax_read(vcpu);
+       if (encls_leaf > 62)
+               encls_leaf = 63;
+       return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
+}
+
 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
        struct vmcs12 *vmcs12, gpa_t bitmap)
 {
        case EXIT_REASON_VMFUNC:
                /* VM functions are emulated through L2->L0 vmexits. */
                return true;
-       case EXIT_REASON_ENCLS:
-               /* SGX is never exposed to L1 */
-               return true;
        default:
                break;
        }
        case EXIT_REASON_TPAUSE:
                return nested_cpu_has2(vmcs12,
                        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
+       case EXIT_REASON_ENCLS:
+               return nested_vmx_exit_handled_encls(vcpu, vmcs12);
        default:
                return true;
        }
                msrs->secondary_ctls_high |=
                        SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
 
+       if (enable_sgx)
+               msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                msrs->misc_low,
 
                PIN_BASED_EXT_INTR_MASK;
 }
 
+static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
+{
+       return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
+}
+
 /*
  * if fixed0[i] == 1: val[i] must be 1
  * if fixed1[i] == 0: val[i] must be 0
 
 
 #include "cpuid.h"
 #include "kvm_cache_regs.h"
+#include "nested.h"
 #include "sgx.h"
 #include "vmx.h"
 #include "x86.h"
 
-bool __read_mostly enable_sgx;
+bool __read_mostly enable_sgx = 1;
+module_param_named(sgx, enable_sgx, bool, 0444);
 
 /* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */
 static u64 sgx_pubkey_hash[4] __ro_after_init;
        memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash,
               sizeof(sgx_pubkey_hash));
 }
+
+/*
+ * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM
+ * restrictions if the guest's allowed-1 settings diverge from hardware.
+ */
+static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *guest_cpuid;
+       u32 eax, ebx, ecx, edx;
+
+       if (!vcpu->kvm->arch.sgx_provisioning_allowed)
+               return true;
+
+       guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0);
+       if (!guest_cpuid)
+               return true;
+
+       cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx);
+       if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx)
+               return true;
+
+       guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1);
+       if (!guest_cpuid)
+               return true;
+
+       cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx);
+       if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx ||
+           guest_cpuid->ecx != ecx || guest_cpuid->edx != edx)
+               return true;
+
+       return false;
+}
+
+void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+       /*
+        * There is no software enable bit for SGX that is virtualized by
+        * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the
+        * guest (either by the host or by the guest's BIOS) but enabled in the
+        * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate
+        * the expected system behavior for ENCLS.
+        */
+       u64 bitmap = -1ull;
+
+       /* Nothing to do if hardware doesn't support SGX */
+       if (!cpu_has_vmx_encls_vmexit())
+               return;
+
+       if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) &&
+           sgx_enabled_in_guest_bios(vcpu)) {
+               if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
+                       bitmap &= ~GENMASK_ULL(ETRACK, ECREATE);
+                       if (sgx_intercept_encls_ecreate(vcpu))
+                               bitmap |= (1 << ECREATE);
+               }
+
+               if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2))
+                       bitmap &= ~GENMASK_ULL(EMODT, EAUG);
+
+               /*
+                * Trap and execute EINIT if launch control is enabled in the
+                * host using the guest's values for launch control MSRs, even
+                * if the guest's values are fixed to hardware default values.
+                * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing
+                * the MSRs is extraordinarily expensive.
+                */
+               if (boot_cpu_has(X86_FEATURE_SGX_LC))
+                       bitmap |= (1 << EINIT);
+
+               if (!vmcs12 && is_guest_mode(vcpu))
+                       vmcs12 = get_vmcs12(vcpu);
+               if (vmcs12 && nested_cpu_has_encls_exit(vmcs12))
+                       bitmap |= vmcs12->encls_exiting_bitmap;
+       }
+       vmcs_write64(ENCLS_EXITING_BITMAP, bitmap);
+}
 
 
 #include <linux/kvm_host.h>
 
+#include "capabilities.h"
+#include "vmx_ops.h"
+
 #ifdef CONFIG_X86_SGX_KVM
 extern bool __read_mostly enable_sgx;
 
 
 void setup_default_sgx_lepubkeyhash(void);
 void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu);
+
+void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12);
 #else
 #define enable_sgx 0
 
 static inline void setup_default_sgx_lepubkeyhash(void) { }
 static inline void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) { }
+
+static inline void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu,
+                                         struct vmcs12 *vmcs12)
+{
+       /* Nothing to do if hardware doesn't support SGX */
+       if (cpu_has_vmx_encls_vmexit())
+               vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+}
 #endif
 
 #endif /* __KVM_X86_SGX_H */
 
        FIELD64(VMREAD_BITMAP, vmread_bitmap),
        FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
        FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
+       FIELD64(ENCLS_EXITING_BITMAP, encls_exiting_bitmap),
        FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
        FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
        FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
 
        u64 vm_function_control;
        u64 eptp_list_address;
        u64 pml_address;
-       u64 padding64[3]; /* room for future expansion */
+       u64 encls_exiting_bitmap;
+       u64 padding64[2]; /* room for future expansion */
        /*
         * To allow migration of L1 (complete with its L2 guests) between
         * machines of different natural widths (32 or 64 bit), we cannot have
        CHECK_OFFSET(vm_function_control, 296);
        CHECK_OFFSET(eptp_list_address, 304);
        CHECK_OFFSET(pml_address, 312);
+       CHECK_OFFSET(encls_exiting_bitmap, 320);
        CHECK_OFFSET(cr0_guest_host_mask, 344);
        CHECK_OFFSET(cr4_guest_host_mask, 352);
        CHECK_OFFSET(cr0_read_shadow, 360);
 
                vmx->msr_ia32_feature_control = data;
                if (msr_info->host_initiated && data == 0)
                        vmx_leave_nested(vcpu);
+
+               /* SGX may be enabled/disabled by guest's firmware */
+               vmx_write_encls_bitmap(vcpu, NULL);
                break;
        case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
                /*
                vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
        }
 
-       if (cpu_has_vmx_encls_vmexit())
-               vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+       vmx_write_encls_bitmap(&vmx->vcpu, NULL);
 
        if (vmx_pt_mode_is_host_guest()) {
                memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
 
        set_cr4_guest_host_mask(vmx);
 
+       vmx_write_encls_bitmap(vcpu, NULL);
+       if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
+               vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
+       else
+               vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
+
+       if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
+               vmx->msr_ia32_feature_control_valid_bits |=
+                       FEAT_CTL_SGX_LC_ENABLED;
+       else
+               vmx->msr_ia32_feature_control_valid_bits &=
+                       ~FEAT_CTL_SGX_LC_ENABLED;
+
        /* Refresh #PF interception to account for MAXPHYADDR changes. */
        vmx_update_exception_bitmap(vcpu);
 }
        if (vmx_pt_mode_is_host_guest())
                kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
 
+       if (!enable_sgx) {
+               kvm_cpu_cap_clear(X86_FEATURE_SGX);
+               kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
+               kvm_cpu_cap_clear(X86_FEATURE_SGX1);
+               kvm_cpu_cap_clear(X86_FEATURE_SGX2);
+       }
+
        if (vmx_umip_emulated())
                kvm_cpu_cap_set(X86_FEATURE_UMIP);