struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
        hr->dpdes = vc->dpdes;
-       hr->hfscr = vcpu->arch.hfscr;
        hr->purr = vcpu->arch.purr;
        hr->spurr = vcpu->arch.spurr;
        hr->ic = vcpu->arch.ic;
        case BOOK3S_INTERRUPT_H_INST_STORAGE:
                hr->asdr = vcpu->arch.fault_gpa;
                break;
+       case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+               hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
+                            (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
+               break;
        case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
                hr->heir = vcpu->arch.emul_inst;
                break;
        }
 }
 
-/*
- * This can result in some L0 HV register state being leaked to an L1
- * hypervisor when the hv_guest_state is copied back to the guest after
- * being modified here.
- *
- * There is no known problem with such a leak, and in many cases these
- * register settings could be derived by the guest by observing behaviour
- * and timing, interrupts, etc., but it is an issue to consider.
- */
-static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
-{
-       struct kvmppc_vcore *vc = vcpu->arch.vcore;
-       u64 mask;
-
-       /*
-        * Don't let L1 change LPCR bits for the L2 except these:
-        */
-       mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
-               LPCR_LPES | LPCR_MER;
-
-       /*
-        * Additional filtering is required depending on hardware
-        * and configuration.
-        */
-       hr->lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
-                       (vc->lpcr & ~mask) | (hr->lpcr & mask));
-
-       /*
-        * Don't let L1 enable features for L2 which we've disabled for L1,
-        * but preserve the interrupt cause field.
-        */
-       hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
-
-       /* Don't let data address watchpoint match in hypervisor state */
-       hr->dawrx0 &= ~DAWRX_HYP;
-       hr->dawrx1 &= ~DAWRX_HYP;
-
-       /* Don't let completed instruction address breakpt match in HV state */
-       if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
-               hr->ciabr &= ~CIABR_PRIV;
-}
-
-static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
                                     sizeof(struct pt_regs));
 }
 
+static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
+                           const struct hv_guest_state *l2_hv,
+                           const struct hv_guest_state *l1_hv, u64 *lpcr)
+{
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       u64 mask;
+
+       restore_hv_regs(vcpu, l2_hv);
+
+       /*
+        * Don't let L1 change LPCR bits for the L2 except these:
+        */
+       mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
+               LPCR_LPES | LPCR_MER;
+
+       /*
+        * Additional filtering is required depending on hardware
+        * and configuration.
+        */
+       *lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
+                                     (vc->lpcr & ~mask) | (*lpcr & mask));
+
+       /*
+        * Don't let L1 enable features for L2 which we've disabled for L1,
+        * but preserve the interrupt cause field.
+        */
+       vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
+
+       /* Don't let data address watchpoint match in hypervisor state */
+       vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
+       vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
+
+       /* Don't let completed instruction address breakpt match in HV state */
+       if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+               vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
+}
+
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 {
        long int err, r;
        struct hv_guest_state l2_hv = {0}, saved_l1_hv;
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        u64 hv_ptr, regs_ptr;
-       u64 hdec_exp;
+       u64 hdec_exp, lpcr;
        s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
 
        if (vcpu->kvm->arch.l1_ptcr == 0)
        /* Guest must always run with ME enabled, HV disabled. */
        vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
 
-       sanitise_hv_regs(vcpu, &l2_hv);
-       restore_hv_regs(vcpu, &l2_hv);
+       lpcr = l2_hv.lpcr;
+       load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
 
        vcpu->arch.ret = RESUME_GUEST;
        vcpu->arch.trap = 0;
                        r = RESUME_HOST;
                        break;
                }
-               r = kvmhv_run_single_vcpu(vcpu, hdec_exp, l2_hv.lpcr);
+               r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
        } while (is_kvmppc_resume_guest(r));
 
        /* save L2 state for return */