#define __SMCCC_WORKAROUND_1_SMC_SZ 36
 
+#define KVM_HOST_SMCCC_ID(id)                                          \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_64,                            \
+                          ARM_SMCCC_OWNER_VENDOR_HYP,                  \
+                          (id))
+
+#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
+
+#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                   0
+#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run                   1
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context           2
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa         3
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid             4
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid       5
+#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff          6
+#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs                        7
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2                8
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr              9
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr             10
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs               11
+#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2               12
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs              13
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs           14
+
 #ifndef __ASSEMBLY__
 
 #include <linux/mm.h>
 
 #ifndef __ARM64_KVM_HOST_H__
 #define __ARM64_KVM_HOST_H__
 
+#include <linux/arm-smccc.h>
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
-u64 __kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
+                       unsigned long hyp_stack_ptr,
+                       unsigned long vector_ptr,
+                       unsigned long tpidr_el2);
 
-#define kvm_call_hyp_nvhe(f, ...)                                      \
-       do {                                                            \
-               DECLARE_KVM_NVHE_SYM(f);                                \
-               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
-       } while(0)
-
-#define kvm_call_hyp_nvhe_ret(f, ...)                                  \
+#define kvm_call_hyp_nvhe(f, ...)                                              \
        ({                                                              \
-               DECLARE_KVM_NVHE_SYM(f);                                \
-               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
+               struct arm_smccc_res res;                               \
+                                                                       \
+               arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),               \
+                                 ##__VA_ARGS__, &res);                 \
+               WARN_ON(res.a0 != SMCCC_RET_SUCCESS);                   \
+                                                                       \
+               res.a1;                                                 \
        })
 
 /*
                        ret = f(__VA_ARGS__);                           \
                        isb();                                          \
                } else {                                                \
-                       ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);  \
+                       ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);      \
                }                                                       \
                                                                        \
                ret;                                                    \
 
         * cpus_have_const_cap() wrapper.
         */
        BUG_ON(!system_capabilities_finalized());
-       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+       __kvm_call_hyp_init(pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
 
        /*
         * Disabling SSBD on a non-VHE system requires us to enable SSBS
 
 #include <asm/cpufeature.h>
 
 /*
- * u64 __kvm_call_hyp(void *hypfn, ...);
- *
- * This is not really a variadic function in the classic C-way and care must
- * be taken when calling this to ensure parameters are passed in registers
- * only, since the stack will change between the caller and the callee.
- *
- * Call the function with the first argument containing a pointer to the
- * function you wish to call in Hyp mode, and subsequent arguments will be
- * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
- * function pointer can be passed).  The function being called must be mapped
- * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
- * passed in x0.
- *
- * A function pointer with a value less than 0xfff has a special meaning,
- * and is used to implement hyp stubs in the same way as in
- * arch/arm64/kernel/hyp_stub.S.
+ * u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
+ *                        unsigned long hyp_stack_ptr,
+ *                        unsigned long vector_ptr,
+ *                        unsigned long tpidr_el2);
  */
-SYM_FUNC_START(__kvm_call_hyp)
+SYM_FUNC_START(__kvm_call_hyp_init)
        hvc     #0
        ret
-SYM_FUNC_END(__kvm_call_hyp)
+SYM_FUNC_END(__kvm_call_hyp_init)
 
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
-typedef unsigned long (*hypcall_fn_t)
-       (unsigned long, unsigned long, unsigned long);
+#include <kvm/arm_hypercalls.h>
+
+static void handle_host_hcall(unsigned long func_id,
+                             struct kvm_cpu_context *host_ctxt)
+{
+       unsigned long ret = 0;
+
+       switch (func_id) {
+       case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
+
+               ret = __kvm_vcpu_run(vcpu);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
+               __kvm_flush_vm_context();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
+               phys_addr_t ipa = host_ctxt->regs.regs[2];
+               int level = host_ctxt->regs.regs[3];
+
+               __kvm_tlb_flush_vmid_ipa(mmu, ipa, level);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
+
+               __kvm_tlb_flush_vmid(mmu);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
+
+               __kvm_tlb_flush_local_vmid(mmu);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
+               u64 cntvoff = host_ctxt->regs.regs[1];
+
+               __kvm_timer_set_cntvoff(cntvoff);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
+               __kvm_enable_ssbs();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
+               ret = __vgic_v3_get_ich_vtr_el2();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
+               ret = __vgic_v3_read_vmcr();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
+               u32 vmcr = host_ctxt->regs.regs[1];
+
+               __vgic_v3_write_vmcr(vmcr);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
+               __vgic_v3_init_lrs();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
+               ret = __kvm_get_mdcr_el2();
+               break;
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
+
+               __vgic_v3_save_aprs(cpu_if);
+               break;
+       }
+       case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
+               unsigned long r1 = host_ctxt->regs.regs[1];
+               struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
+
+               __vgic_v3_restore_aprs(cpu_if);
+               break;
+       }
+       default:
+               /* Invalid host HVC. */
+               host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED;
+               return;
+       }
+
+       host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS;
+       host_ctxt->regs.regs[1] = ret;
+}
 
 void handle_trap(struct kvm_cpu_context *host_ctxt)
 {
        u64 esr = read_sysreg_el2(SYS_ESR);
-       hypcall_fn_t func;
-       unsigned long ret;
+       unsigned long func_id;
 
        if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
                hyp_panic();
 
-       /*
-        * __kvm_call_hyp takes a pointer in the host address space and
-        * up to three arguments.
-        */
-       func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]);
-       ret = func(host_ctxt->regs.regs[1],
-                  host_ctxt->regs.regs[2],
-                  host_ctxt->regs.regs[3]);
-       host_ctxt->regs.regs[0] = ret;
+       func_id = host_ctxt->regs.regs[0];
+       handle_host_hcall(func_id, host_ctxt);
 }