enum kvm_riscv_hfence_type {
        KVM_RISCV_HFENCE_UNKNOWN = 0,
        KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+       KVM_RISCV_HFENCE_GVMA_VMID_ALL,
        KVM_RISCV_HFENCE_VVMA_ASID_GVA,
        KVM_RISCV_HFENCE_VVMA_ASID_ALL,
        KVM_RISCV_HFENCE_VVMA_GVA,
+       KVM_RISCV_HFENCE_VVMA_ALL
 };
 
 struct kvm_riscv_hfence {
 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    gpa_t gpa, gpa_t gpsz,
-                                   unsigned long order);
+                                   unsigned long order, unsigned long vmid);
 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
-                                   unsigned long hbase, unsigned long hmask);
+                                   unsigned long hbase, unsigned long hmask,
+                                   unsigned long vmid);
 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    unsigned long gva, unsigned long gvsz,
-                                   unsigned long order, unsigned long asid);
+                                   unsigned long order, unsigned long asid,
+                                   unsigned long vmid);
 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
-                                   unsigned long asid);
+                                   unsigned long asid, unsigned long vmid);
 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask,
                               unsigned long gva, unsigned long gvsz,
-                              unsigned long order);
+                              unsigned long order, unsigned long vmid);
 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
-                              unsigned long hbase, unsigned long hmask);
+                              unsigned long hbase, unsigned long hmask,
+                              unsigned long vmid);
 
 #endif
 
        if (gstage->flags & KVM_GSTAGE_FLAGS_LOCAL)
                kvm_riscv_local_hfence_gvma_vmid_gpa(gstage->vmid, addr, BIT(order), order);
        else
-               kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order);
+               kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order,
+                                              gstage->vmid);
 }
 
 int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
 
                                kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
                                                                     d.size, d.order);
                        break;
+               case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
+                       else
+                               kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
+                       break;
                case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
                        if (kvm_riscv_nacl_available())
                                kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
                                                                d.size, d.order);
                        break;
+               case KVM_RISCV_HFENCE_VVMA_ALL:
+                       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
+                       if (kvm_riscv_nacl_available())
+                               nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
+                       else
+                               kvm_riscv_local_hfence_vvma_all(d.vmid);
+                       break;
                default:
                        break;
                }
 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    gpa_t gpa, gpa_t gpsz,
-                                   unsigned long order)
+                                   unsigned long order, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
        data.asid = 0;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gpa;
        data.size = gpsz;
        data.order = order;
 }
 
 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
-                                   unsigned long hbase, unsigned long hmask)
+                                   unsigned long hbase, unsigned long hmask,
+                                   unsigned long vmid)
 {
-       make_xfence_request(kvm, hbase, hmask, KVM_REQ_TLB_FLUSH,
-                           KVM_REQ_TLB_FLUSH, NULL);
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_TLB_FLUSH, &data);
 }
 
 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
                                    unsigned long gva, unsigned long gvsz,
-                                   unsigned long order, unsigned long asid)
+                                   unsigned long order, unsigned long asid,
+                                   unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
        data.asid = asid;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gva;
        data.size = gvsz;
        data.order = order;
 
 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
                                    unsigned long hbase, unsigned long hmask,
-                                   unsigned long asid)
+                                   unsigned long asid, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
-       struct kvm_riscv_hfence data;
+       struct kvm_riscv_hfence data = {0};
 
        data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
        data.asid = asid;
-       data.vmid = READ_ONCE(v->vmid);
-       data.addr = data.size = data.order = 0;
+       data.vmid = vmid;
        make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
                            KVM_REQ_HFENCE_VVMA_ALL, &data);
 }
 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask,
                               unsigned long gva, unsigned long gvsz,
-                              unsigned long order)
+                              unsigned long order, unsigned long vmid)
 {
-       struct kvm_vmid *v = &kvm->arch.vmid;
        struct kvm_riscv_hfence data;
 
        data.type = KVM_RISCV_HFENCE_VVMA_GVA;
        data.asid = 0;
-       data.vmid = READ_ONCE(v->vmid);
+       data.vmid = vmid;
        data.addr = gva;
        data.size = gvsz;
        data.order = order;
 }
 
 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
-                              unsigned long hbase, unsigned long hmask)
+                              unsigned long hbase, unsigned long hmask,
+                              unsigned long vmid)
 {
-       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
-                           KVM_REQ_HFENCE_VVMA_ALL, NULL);
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_HFENCE_VVMA_ALL;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_HFENCE_VVMA_ALL, &data);
 }
 
 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
 {
        kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
                                       gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
-                                      PAGE_SHIFT);
+                                      PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
        return 0;
 }
 
        unsigned long hmask = cp->a0;
        unsigned long hbase = cp->a1;
        unsigned long funcid = cp->a6;
+       unsigned long vmid;
 
        switch (funcid) {
        case SBI_EXT_RFENCE_REMOTE_FENCE_I:
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+               vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
-                       kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
+                       kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
                else
                        kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
-                                                 cp->a2, cp->a3, PAGE_SHIFT);
+                                                 cp->a2, cp->a3, PAGE_SHIFT, vmid);
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+               vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
-                       kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
-                                                      hbase, hmask, cp->a4);
+                       kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
+                                                      cp->a4, vmid);
                else
-                       kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
-                                                      hbase, hmask,
-                                                      cp->a2, cp->a3,
-                                                      PAGE_SHIFT, cp->a4);
+                       kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
+                                                      cp->a3, PAGE_SHIFT, cp->a4, vmid);
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
 
        struct kvm *kvm = vcpu->kvm;
        struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
        struct kvm_cpu_trap *utrap = retdata->utrap;
+       unsigned long vmid;
 
        switch (cp->a7) {
        case SBI_EXT_0_1_CONSOLE_GETCHAR:
                if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
                        kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
                else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
+                       vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                        if (cp->a1 == 0 && cp->a2 == 0)
-                               kvm_riscv_hfence_vvma_all(vcpu->kvm,
-                                                         0, hmask);
+                               kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid);
                        else
-                               kvm_riscv_hfence_vvma_gva(vcpu->kvm,
-                                                         0, hmask,
-                                                         cp->a1, cp->a2,
-                                                         PAGE_SHIFT);
+                               kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1,
+                                                         cp->a2, PAGE_SHIFT, vmid);
                } else {
+                       vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
                        if (cp->a1 == 0 && cp->a2 == 0)
-                               kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
-                                                              0, hmask,
-                                                              cp->a3);
+                               kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask,
+                                                              cp->a3, vmid);
                        else
-                               kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
-                                                              0, hmask,
-                                                              cp->a1, cp->a2,
-                                                              PAGE_SHIFT,
-                                                              cp->a3);
+                               kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask,
+                                                              cp->a1, cp->a2, PAGE_SHIFT,
+                                                              cp->a3, vmid);
                }
                break;
        default: