}
 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
 
+void fpu_copy_fpstate_to_kvm_uabi(struct fpu *fpu, void *buf,
+                              unsigned int size, u32 pkru)
+{
+       union fpregs_state *kstate = &fpu->state;
+       union fpregs_state *ustate = buf;
+       struct membuf mb = { .p = buf, .left = size };
+
+       if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+               __copy_xstate_to_uabi_buf(mb, &kstate->xsave, pkru,
+                                         XSTATE_COPY_XSAVE);
+       } else {
+               memcpy(&ustate->fxsave, &kstate->fxsave, sizeof(ustate->fxsave));
+               /* Make it restorable on a XSAVE enabled host */
+               ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
+       }
+}
+EXPORT_SYMBOL_GPL(fpu_copy_fpstate_to_kvm_uabi);
+
 int fpu_copy_kvm_uabi_to_fpstate(struct fpu *fpu, const void *buf, u64 xcr0,
                                 u32 *vpkru)
 {
 
        return 0;
 }
 
-static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
-{
-       struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
-       u64 xstate_bv = xsave->header.xfeatures;
-       u64 valid;
-
-       /*
-        * Copy legacy XSAVE area, to avoid complications with CPUID
-        * leaves 0 and 1 in the loop below.
-        */
-       memcpy(dest, xsave, XSAVE_HDR_OFFSET);
-
-       /* Set XSTATE_BV */
-       xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
-       *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
-
-       /*
-        * Copy each region from the possibly compacted offset to the
-        * non-compacted offset.
-        */
-       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
-       while (valid) {
-               u32 size, offset, ecx, edx;
-               u64 xfeature_mask = valid & -valid;
-               int xfeature_nr = fls64(xfeature_mask) - 1;
-               void *src;
-
-               cpuid_count(XSTATE_CPUID, xfeature_nr,
-                           &size, &offset, &ecx, &edx);
-
-               if (xfeature_nr == XFEATURE_PKRU) {
-                       memcpy(dest + offset, &vcpu->arch.pkru,
-                              sizeof(vcpu->arch.pkru));
-               } else {
-                       src = get_xsave_addr(xsave, xfeature_nr);
-                       if (src)
-                               memcpy(dest + offset, src, size);
-               }
-
-               valid -= xfeature_mask;
-       }
-}
-
 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                                         struct kvm_xsave *guest_xsave)
 {
        if (!vcpu->arch.guest_fpu)
                return;
 
-       if (boot_cpu_has(X86_FEATURE_XSAVE)) {
-               memset(guest_xsave, 0, sizeof(struct kvm_xsave));
-               fill_xsave((u8 *) guest_xsave->region, vcpu);
-       } else {
-               memcpy(guest_xsave->region,
-                       &vcpu->arch.guest_fpu->state.fxsave,
-                       sizeof(struct fxregs_state));
-               *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
-                       XFEATURE_MASK_FPSSE;
-       }
+       fpu_copy_fpstate_to_kvm_uabi(vcpu->arch.guest_fpu, guest_xsave->region,
+                                    sizeof(guest_xsave->region),
+                                    vcpu->arch.pkru);
 }
 
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,