* @vcpu: virtual cpu
  * @gva: guest virtual address
  * @gpa: points to where guest physical (absolute) address should be stored
+ * @asce: effective asce
  * @write: indicates if access is a write access
  *
  * Translate a guest virtual address into a guest absolute address by means
  *           by the architecture
  */
 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
-                                    unsigned long *gpa, int write)
+                                    unsigned long *gpa, const union asce asce,
+                                    int write)
 {
        union vaddress vaddr = {.addr = gva};
        union raddress raddr = {.addr = gva};
        union ctlreg0 ctlreg0;
        unsigned long ptr;
        int edat1, edat2;
-       union asce asce;
 
        ctlreg0.val = vcpu->arch.sie_block->gcr[0];
        edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
        edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
-       asce.val = get_vcpu_asce(vcpu);
        if (asce.r)
                goto real_address;
        ptr = asce.origin * 4096;
        return (ga & ~0x11fful) == 0;
 }
 
-static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
+static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
+                                         const union asce asce)
 {
        union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
        psw_t *psw = &vcpu->arch.sie_block->gpsw;
-       union asce asce;
 
        if (!ctlreg0.lap)
                return 0;
-       asce.val = get_vcpu_asce(vcpu);
        if (psw_bits(*psw).t && asce.p)
                return 0;
        return 1;
 
 static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
                            unsigned long *pages, unsigned long nr_pages,
-                           int write)
+                           const union asce asce, int write)
 {
        struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
        psw_t *psw = &vcpu->arch.sie_block->gpsw;
        tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
        tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
        tec_bits->as = psw_bits(*psw).as;
-       lap_enabled = low_address_protection_enabled(vcpu);
+       lap_enabled = low_address_protection_enabled(vcpu, asce);
        while (nr_pages) {
                ga = kvm_s390_logical_to_effective(vcpu, ga);
                tec_bits->addr = ga >> PAGE_SHIFT;
                }
                ga &= PAGE_MASK;
                if (psw_bits(*psw).t) {
-                       rc = guest_translate(vcpu, ga, pages, write);
+                       rc = guest_translate(vcpu, ga, pages, asce, write);
                        if (rc < 0)
                                return rc;
                        if (rc == PGM_PROTECTION)
        need_ipte_lock = psw_bits(*psw).t && !asce.r;
        if (need_ipte_lock)
                ipte_lock(vcpu);
-       rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
+       rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
        for (idx = 0; idx < nr_pages && !rc; idx++) {
                gpa = *(pages + idx) + (ga & ~PAGE_MASK);
                _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
        tec->as = psw_bits(*psw).as;
        tec->fsi = write ? FSI_STORE : FSI_FETCH;
        tec->addr = gva >> PAGE_SHIFT;
-       if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+       asce.val = get_vcpu_asce(vcpu);
+       if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
                if (write) {
                        rc = pgm->code = PGM_PROTECTION;
                        return rc;
                }
        }
 
-       asce.val = get_vcpu_asce(vcpu);
        if (psw_bits(*psw).t && !asce.r) {      /* Use DAT? */
-               rc = guest_translate(vcpu, gva, gpa, write);
+               rc = guest_translate(vcpu, gva, gpa, asce, write);
                if (rc > 0) {
                        if (rc == PGM_PROTECTION)
                                tec->b61 = 1;