static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                                         struct kvm_xsave *guest_xsave)
 {
+       if (!vcpu->arch.guest_fpu)
+               return;
+
        if (boot_cpu_has(X86_FEATURE_XSAVE)) {
                memset(guest_xsave, 0, sizeof(struct kvm_xsave));
                fill_xsave((u8 *) guest_xsave->region, vcpu);
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
                                        struct kvm_xsave *guest_xsave)
 {
-       u64 xstate_bv =
-               *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
-       u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
+       u64 xstate_bv;
+       u32 mxcsr;
+
+       if (!vcpu->arch.guest_fpu)
+               return 0;
+
+       xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+       mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
 
        if (boot_cpu_has(X86_FEATURE_XSAVE)) {
                /*
 
        kvm_save_current_fpu(vcpu->arch.user_fpu);
 
-       /* PKRU is separately restored in kvm_x86_ops.run.  */
-       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
-                               ~XFEATURE_MASK_PKRU);
+       /*
+        * Guests with protected state can't have it set by the hypervisor,
+        * so skip trying to set it.
+        */
+       if (vcpu->arch.guest_fpu)
+               /* PKRU is separately restored in kvm_x86_ops.run. */
+               __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
+                                       ~XFEATURE_MASK_PKRU);
 
        fpregs_mark_activate();
        fpregs_unlock();
 {
        fpregs_lock();
 
-       kvm_save_current_fpu(vcpu->arch.guest_fpu);
+       /*
+        * Guests with protected state can't have it read by the hypervisor,
+        * so skip trying to save it.
+        */
+       if (vcpu->arch.guest_fpu)
+               kvm_save_current_fpu(vcpu->arch.guest_fpu);
 
        copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
 
 {
        struct fxregs_state *fxsave;
 
+       if (!vcpu->arch.guest_fpu)
+               return 0;
+
        vcpu_load(vcpu);
 
        fxsave = &vcpu->arch.guest_fpu->state.fxsave;
 {
        struct fxregs_state *fxsave;
 
+       if (!vcpu->arch.guest_fpu)
+               return 0;
+
        vcpu_load(vcpu);
 
        fxsave = &vcpu->arch.guest_fpu->state.fxsave;
 
 static void fx_init(struct kvm_vcpu *vcpu)
 {
+       if (!vcpu->arch.guest_fpu)
+               return;
+
        fpstate_init(&vcpu->arch.guest_fpu->state);
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
        vcpu->arch.cr0 |= X86_CR0_ET;
 }
 
+void kvm_free_guest_fpu(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.guest_fpu) {
+               kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
+               vcpu->arch.guest_fpu = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_free_guest_fpu);
+
 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 {
        if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
        return 0;
 
 free_guest_fpu:
-       kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
+       kvm_free_guest_fpu(vcpu);
 free_user_fpu:
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
 free_emulate_ctxt:
        kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
        free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
-       kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
+       kvm_free_guest_fpu(vcpu);
 
        kvm_hv_vcpu_uninit(vcpu);
        kvm_pmu_destroy(vcpu);
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
-       if (kvm_mpx_supported()) {
+       if (vcpu->arch.guest_fpu && kvm_mpx_supported()) {
                void *mpx_state_buffer;
 
                /*