*/
                __cpu_init_stage2();
        } else {
-               if (__hyp_get_vectors() == hyp_default_vectors)
-                       cpu_init_hyp_mode(NULL);
+               cpu_init_hyp_mode(NULL);
        }
 +
 +      if (vgic_present)
 +              kvm_vgic_init_cpu_hardware();
  }
  
- static void cpu_hyp_reset(void)
- {
-       if (!is_kernel_in_hyp_mode())
-               __cpu_reset_hyp_mode(hyp_default_vectors,
-                                    kvm_get_idmap_start());
- }
- 
  static void _kvm_arch_hardware_enable(void *discard)
  {
        if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
 
  
        empty_zero_page = virt_to_page(zero_page);
        __flush_dcache_page(NULL, empty_zero_page);
+ 
+       /* Compute the virt/idmap offset, mostly for the sake of KVM */
+       kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
  }
 +
 +void __init early_mm_init(const struct machine_desc *mdesc)
 +{
 +      build_mem_type_table();
 +      early_paging_init(mdesc);
 +}
 
                              kvm_s390_available_subfunc.pcc);
        }
        if (test_facility(57)) /* MSA5 */
 -              __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
 +              __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
                              kvm_s390_available_subfunc.ppno);
  
+       if (test_facility(146)) /* MSA8 */
+               __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kma);
+ 
        if (MACHINE_HAS_ESOP)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
        /*
 
                /* enable and lock */
                wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
        }
-       cr4_set_bits(X86_CR4_VMXE);
- 
-       if (vmm_exclusive) {
-               kvm_cpu_vmxon(phys_addr);
-               ept_sync_global();
-       }
+       kvm_cpu_vmxon(phys_addr);
+       ept_sync_global();
  
 -      native_store_gdt(this_cpu_ptr(&host_gdt));
 -
        return 0;
  }
  
  
        }
  
 +      if (enable_pml) {
 +              /*
 +               * Conceptually we want to copy the PML address and index from
 +               * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
 +               * since we always flush the log on each vmexit, this happens
 +               * to be equivalent to simply resetting the fields in vmcs02.
 +               */
 +              ASSERT(vmx->pml_pg);
 +              vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
 +              vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
 +      }
 +
        if (nested_cpu_has_ept(vmcs12)) {
-               kvm_mmu_unload(vcpu);
-               nested_ept_init_mmu_context(vcpu);
+               if (nested_ept_init_mmu_context(vcpu)) {
+                       *entry_failure_code = ENTRY_FAIL_DEFAULT;
+                       return 1;
+               }
        } else if (nested_cpu_has2(vmcs12,
                                   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                vmx_flush_tlb_ept_only(vcpu);
 
  
  #include "vgic.h"
  
- /*
-  * Call this function to convert a u64 value to an unsigned long * bitmask
-  * in a way that works on both 32-bit and 64-bit LE and BE platforms.
-  *
-  * Warning: Calling this function may modify *val.
-  */
- static unsigned long *u64_to_bitmask(u64 *val)
- {
- #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
-       *val = (*val >> 32) | (*val << 32);
- #endif
-       return (unsigned long *)val;
- }
- 
 +static inline void vgic_v2_write_lr(int lr, u32 val)
 +{
 +      void __iomem *base = kvm_vgic_global_state.vctrl_base;
 +
 +      writel_relaxed(val, base + GICH_LR0 + (lr * 4));
 +}
 +
 +void vgic_v2_init_lrs(void)
 +{
 +      int i;
 +
 +      for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
 +              vgic_v2_write_lr(i, 0);
 +}
 +
- void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  
                GICH_VMCR_ALIAS_BINPOINT_MASK;
        vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
                GICH_VMCR_BINPOINT_MASK;
 -      vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
 -              GICH_VMCR_PRIMASK_MASK;
 +      vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
 +               GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  
-       vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+       cpu_if->vgic_vmcr = vmcr;
  }
  
  void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 
  int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
                             enum vgic_type);
  
 +void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
  
  static inline void vgic_get_irq_kref(struct vgic_irq *irq)
  {