static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
 {
        int rd, rj;
-       unsigned int index;
+       unsigned int index, ret;
 
        if (inst.reg2_format.opcode != cpucfg_op)
                return EMULATE_FAIL;
                vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
                break;
        case CPUCFG_KVM_FEATURE:
-               vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
+               ret = KVM_FEATURE_IPI;
+               if (kvm_pvtime_supported())
+                       ret |= KVM_FEATURE_STEAL_TIME;
+               vcpu->arch.gprs[rd] = ret;
                break;
        default:
                vcpu->arch.gprs[rd] = 0;
        return RESUME_GUEST;
 }
 
+static long kvm_save_notify(struct kvm_vcpu *vcpu)
+{
+       unsigned long id, data;
+
+       id   = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+       data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+       switch (id) {
+       case KVM_FEATURE_STEAL_TIME:
+               if (!kvm_pvtime_supported())
+                       return KVM_HCALL_INVALID_CODE;
+
+               if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+                       return KVM_HCALL_INVALID_PARAMETER;
+
+               vcpu->arch.st.guest_addr = data;
+               if (!(data & KVM_STEAL_PHYS_VALID))
+                       break;
+
+               vcpu->arch.st.last_steal = current->sched_info.run_delay;
+               kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+               break;
+       default:
+               break;
+       };
+
+       return 0;
+};
+
 /*
  * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
  * @vcpu:      Virtual CPU context.
                kvm_send_pv_ipi(vcpu);
                ret = KVM_HCALL_SUCCESS;
                break;
+       case KVM_HCALL_FUNC_NOTIFY:
+               ret = kvm_save_notify(vcpu);
+               break;
        default:
                ret = KVM_HCALL_INVALID_CODE;
                break;
 
                       sizeof(kvm_vcpu_stats_desc),
 };
 
+static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+       u32 version;
+       u64 steal;
+       gpa_t gpa;
+       struct kvm_memslots *slots;
+       struct kvm_steal_time __user *st;
+       struct gfn_to_hva_cache *ghc;
+
+       ghc = &vcpu->arch.st.cache;
+       gpa = vcpu->arch.st.guest_addr;
+       if (!(gpa & KVM_STEAL_PHYS_VALID))
+               return;
+
+       gpa &= KVM_STEAL_PHYS_MASK;
+       slots = kvm_memslots(vcpu->kvm);
+       if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
+                       ghc->gpa = INVALID_GPA;
+                       return;
+               }
+       }
+
+       st = (struct kvm_steal_time __user *)ghc->hva;
+       unsafe_get_user(version, &st->version, out);
+       if (version & 1)
+               version += 1; /* first time write, random junk */
+
+       version += 1;
+       unsafe_put_user(version, &st->version, out);
+       smp_wmb();
+
+       unsafe_get_user(steal, &st->steal, out);
+       steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
+       vcpu->arch.st.last_steal = current->sched_info.run_delay;
+       unsafe_put_user(steal, &st->steal, out);
+
+       smp_wmb();
+       version += 1;
+       unsafe_put_user(version, &st->version, out);
+out:
+       mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
 /*
  * kvm_check_requests - check and handle pending vCPU requests
  *
        if (kvm_dirty_ring_check_request(vcpu))
                return RESUME_HOST;
 
+       if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+               kvm_update_stolen_time(vcpu);
+
        return RESUME_GUEST;
 }
 
        return -ENXIO;
 }
 
+static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       if (!kvm_pvtime_supported() ||
+                       attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+               return -ENXIO;
+
+       return 0;
+}
+
 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
                                       struct kvm_device_attr *attr)
 {
        case KVM_LOONGARCH_VCPU_CPUCFG:
                ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
                break;
+       case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+               ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
+               break;
        default:
                break;
        }
        return ret;
 }
 
-static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
+static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
                                         struct kvm_device_attr *attr)
 {
        int ret = 0;
        return ret;
 }
 
+static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       u64 gpa;
+       u64 __user *user = (u64 __user *)attr->addr;
+
+       if (!kvm_pvtime_supported() ||
+                       attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+               return -ENXIO;
+
+       gpa = vcpu->arch.st.guest_addr;
+       if (put_user(gpa, user))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
                                       struct kvm_device_attr *attr)
 {
 
        switch (attr->group) {
        case KVM_LOONGARCH_VCPU_CPUCFG:
-               ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
+               ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
+               break;
+       case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+               ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
                break;
        default:
                break;
        return -ENXIO;
 }
 
+static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       int idx, ret = 0;
+       u64 gpa, __user *user = (u64 __user *)attr->addr;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (!kvm_pvtime_supported() ||
+                       attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+               return -ENXIO;
+
+       if (get_user(gpa, user))
+               return -EFAULT;
+
+       if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+               return -EINVAL;
+
+       if (!(gpa & KVM_STEAL_PHYS_VALID)) {
+               vcpu->arch.st.guest_addr = gpa;
+               return 0;
+       }
+
+       /* Check the address is in a valid memslot */
+       idx = srcu_read_lock(&kvm->srcu);
+       if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
+               ret = -EINVAL;
+       srcu_read_unlock(&kvm->srcu, idx);
+
+       if (!ret) {
+               vcpu->arch.st.guest_addr = gpa;
+               vcpu->arch.st.last_steal = current->sched_info.run_delay;
+               kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+       }
+
+       return ret;
+}
+
 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
                                       struct kvm_device_attr *attr)
 {
        case KVM_LOONGARCH_VCPU_CPUCFG:
                ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
                break;
+       case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+               ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
+               break;
        default:
                break;
        }
 
        /* Control guest page CCA attribute */
        change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+       kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 
        /* Don't bother restoring registers multiple times unless necessary */
        if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)