uint64_t reg_id = raz_wi_reg_ids[i];
                uint64_t val;
 
-               vcpu_get_reg(vcpu, reg_id, &val);
+               val = vcpu_get_reg(vcpu, reg_id);
                TEST_ASSERT_EQ(val, 0);
 
                /*
                 */
                vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
 
-               vcpu_get_reg(vcpu, reg_id, &val);
+               val = vcpu_get_reg(vcpu, reg_id);
                TEST_ASSERT_EQ(val, 0);
        }
 }
                uint64_t reg_id = raz_invariant_reg_ids[i];
                uint64_t val;
 
-               vcpu_get_reg(vcpu, reg_id, &val);
+               val = vcpu_get_reg(vcpu, reg_id);
                TEST_ASSERT_EQ(val, 0);
 
                r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
                TEST_ASSERT(r < 0 && errno == EINVAL,
                            "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
 
-               vcpu_get_reg(vcpu, reg_id, &val);
+               val = vcpu_get_reg(vcpu, reg_id);
                TEST_ASSERT_EQ(val, 0);
        }
 }
 {
        uint64_t val, el0;
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
+       val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 
        el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
        return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
 
                TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
 
                /* Check if the current pc is expected. */
-               vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
+               pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
                TEST_ASSERT(!test_pc || pc == test_pc,
                            "Unexpected pc 0x%lx (expected 0x%lx)",
                            pc, test_pc);
        uint64_t aa64dfr0;
 
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
+       aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
        __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
                       "Armv8 debug architecture not supported.");
        kvm_vm_free(vm);
 
                const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
 
                /* First 'read' should be an upper limit of the features supported */
-               vcpu_get_reg(vcpu, reg_info->reg, &val);
+               val = vcpu_get_reg(vcpu, reg_info->reg);
                TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
                        "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
                        reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
                        "Failed to clear all the features of reg: 0x%lx; ret: %d",
                        reg_info->reg, errno);
 
-               vcpu_get_reg(vcpu, reg_info->reg, &val);
+               val = vcpu_get_reg(vcpu, reg_info->reg);
                TEST_ASSERT(val == 0,
                        "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
 
                 * Before starting the VM, the test clears all the bits.
                 * Check if that's still the case.
                 */
-               vcpu_get_reg(vcpu, reg_info->reg, &val);
+               val = vcpu_get_reg(vcpu, reg_info->reg);
                TEST_ASSERT(val == 0,
                        "Expected all the features to be cleared for reg: 0x%lx",
                        reg_info->reg);
 
        uint64_t pfr0;
 
        vm = vm_create_with_one_vcpu(&vcpu, NULL);
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &pfr0);
+       pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
        __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
                       "GICv3 not supported.");
        kvm_vm_free(vm);
 
 {
        uint64_t obs_pc, obs_x0;
 
-       vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
-       vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
+       obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
+       obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]));
 
        TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
                    "unexpected target cpu pc: %lx (expected: %lx)",
         */
        vcpu_power_off(target);
 
-       vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
+       target_mpidr = vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
        vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
        enter_guest(source);
 
 
        setup_vm(guest_test_system_off2, &source, &target);
 
-       vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION, &psci_version);
+       psci_version = vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION);
 
        TEST_ASSERT(psci_version >= PSCI_VERSION(1, 3),
                    "Unexpected PSCI version %lu.%lu",
 
        uint64_t mask = ftr_bits->mask;
        uint64_t val, new_val, ftr;
 
-       vcpu_get_reg(vcpu, reg, &val);
+       val = vcpu_get_reg(vcpu, reg);
        ftr = (val & mask) >> shift;
 
        ftr = get_safe_value(ftr_bits, ftr);
        val |= ftr;
 
        vcpu_set_reg(vcpu, reg, val);
-       vcpu_get_reg(vcpu, reg, &new_val);
+       new_val = vcpu_get_reg(vcpu, reg);
        TEST_ASSERT_EQ(new_val, val);
 
        return new_val;
        uint64_t val, old_val, ftr;
        int r;
 
-       vcpu_get_reg(vcpu, reg, &val);
+       val = vcpu_get_reg(vcpu, reg);
        ftr = (val & mask) >> shift;
 
        ftr = get_invalid_value(ftr_bits, ftr);
        TEST_ASSERT(r < 0 && errno == EINVAL,
                    "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
 
-       vcpu_get_reg(vcpu, reg, &val);
+       val = vcpu_get_reg(vcpu, reg);
        TEST_ASSERT_EQ(val, old_val);
 }
 
        }
 
        /* Get the id register value */
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
+       val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 
        /* Try to set MPAM=0. This should always be possible. */
        val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
        }
 
        /* Get the id register value */
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), &val);
+       val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
 
        /* Try to set MPAM_frac=0. This should always be possible. */
        val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
        uint64_t clidr;
        int level;
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr);
+       clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
 
        /* find the first empty level in the cache hierarchy */
        for (level = 1; level < 7; level++) {
 {
        u64 ctr;
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr);
+       ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0));
        ctr &= ~CTR_EL0_DIC_MASK;
        if (ctr & CTR_EL0_IminLine_MASK)
                ctr--;
        test_clidr(vcpu);
        test_ctr(vcpu);
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
+       val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
        val++;
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
 
        size_t idx = encoding_to_range_idx(encoding);
        uint64_t observed;
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed);
+       observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
        TEST_ASSERT_EQ(test_reg_vals[idx], observed);
 }
 
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 
        /* Check for AARCH64 only system */
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
+       val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
        el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
        aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
 
 
                       "Failed to create vgic-v3, skipping");
 
        /* Make sure that PMUv3 support is indicated in the ID register */
-       vcpu_get_reg(vpmu_vm.vcpu,
-                    KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
+       dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
        pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
        TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
                    pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
        create_vpmu_vm(guest_code);
        vcpu = vpmu_vm.vcpu;
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
+       pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
        pmcr = pmcr_orig;
 
        /*
         */
        set_pmcr_n(&pmcr, pmcr_n);
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+       pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
 
        if (expect_fail)
                TEST_ASSERT(pmcr_orig == pmcr,
        vcpu = vpmu_vm.vcpu;
 
        /* Save the initial sp to restore them later to run the guest again */
-       vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
+       sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1));
 
        run_vcpu(vcpu, pmcr_n);
 
                 * Test if the 'set' and 'clr' variants of the registers
                 * are initialized based on the number of valid counters.
                 */
-               vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val);
+               reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
                TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
                            "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
                            KVM_ARM64_SYS_REG(set_reg_id), reg_val);
 
-               vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val);
+               reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
                TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
                            "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
                            KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
                 */
                vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
 
-               vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val);
+               reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
                TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
                            "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
                            KVM_ARM64_SYS_REG(set_reg_id), reg_val);
 
-               vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val);
+               reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
                TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
                            "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
                            KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
        uint64_t pmcr;
 
        create_vpmu_vm(guest_code);
-       vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+       pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
        destroy_vpmu_vm();
        return get_pmcr_n(pmcr);
 }
 
 
        return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
 }
-static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
 {
-       struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
+       uint64_t val;
+       struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
 
        vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
+       return val;
 }
 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
 {
 
         */
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
 
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
-       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
+       sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1));
+       tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1));
 
        /* Configure base granule size */
        switch (vm->mode) {
 {
        uint64_t pstate, pc;
 
-       vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
-       vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
+       pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
+       pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
 
        fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
                indent, "", pstate, pc);
 
 {
        struct kvm_riscv_core core;
 
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
+       core.mode = vcpu_get_reg(vcpu, RISCV_CORE_REG(mode));
+       core.regs.pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc));
+       core.regs.ra = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra));
+       core.regs.sp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp));
+       core.regs.gp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp));
+       core.regs.tp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp));
+       core.regs.t0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0));
+       core.regs.t1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1));
+       core.regs.t2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2));
+       core.regs.s0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0));
+       core.regs.s1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1));
+       core.regs.a0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0));
+       core.regs.a1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1));
+       core.regs.a2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2));
+       core.regs.a3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3));
+       core.regs.a4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4));
+       core.regs.a5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5));
+       core.regs.a6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6));
+       core.regs.a7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7));
+       core.regs.s2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2));
+       core.regs.s3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3));
+       core.regs.s4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4));
+       core.regs.s5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5));
+       core.regs.s6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6));
+       core.regs.s7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7));
+       core.regs.s8 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8));
+       core.regs.s9 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9));
+       core.regs.s10 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10));
+       core.regs.s11 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11));
+       core.regs.t3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3));
+       core.regs.t4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4));
+       core.regs.t5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5));
+       core.regs.t6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6));
 
        fprintf(stream,
                " MODE:  0x%lx\n", core.mode);
 
                vcpu_init_vector_tables(vcpus[i]);
 
        /* Initialize guest timer frequency. */
-       vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq);
+       timer_freq = vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency));
        sync_global_to_guest(vm, timer_freq);
        pr_debug("timer_freq: %lu\n", timer_freq);
 
 
 
        TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
 
-       vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &pc);
+       pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc));
        TEST_ASSERT_EQ(pc, LABEL_ADDRESS(sw_bp_1));
 
        /* skip sw_bp_1 */
 
 
        vcpu_init_vector_tables(vcpu);
        /* Initialize guest timer frequency. */
-       vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq);
+       timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency));
        sync_global_to_guest(vm, timer_freq);
 
        run_vcpu(vcpu);
 
 {
        uint64_t eval_reg;
 
-       vcpu_get_reg(vcpu, id, &eval_reg);
+       eval_reg = vcpu_get_reg(vcpu, id);
        TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
 }
 
 
 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
 {
        uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
-       unsigned long enabled;
+       unsigned long enabled = vcpu_get_reg(vcpu, id);
 
-       vcpu_get_reg(vcpu, id, &enabled);
        TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
 
        return enabled;