return val;
 }
 
-static inline u64 native_read_msr_safe(u32 msr, int *err)
+static inline int native_read_msr_safe(u32 msr, u64 *p)
 {
+       int err;
        EAX_EDX_DECLARE_ARGS(val, low, high);
 
        asm volatile("1: rdmsr ; xor %[err],%[err]\n"
                     "2:\n\t"
                     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
-                    : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+                    : [err] "=r" (err), EAX_EDX_RET(val, low, high)
                     : "c" (msr));
        if (tracepoint_enabled(read_msr))
-               do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
-       return EAX_EDX_VAL(val, low, high);
+               do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
+
+       *p = EAX_EDX_VAL(val, low, high);
+
+       return err;
 }
 
 /* Can be uninlined because referenced by paravirt */
 /* rdmsr with exception handling */
 #define rdmsr_safe(msr, low, high)                             \
 ({                                                             \
-       int __err;                                              \
-       u64 __val = native_read_msr_safe((msr), &__err);        \
+       u64 __val;                                              \
+       int __err = native_read_msr_safe((msr), &__val);        \
        (*low) = (u32)__val;                                    \
        (*high) = (u32)(__val >> 32);                           \
        __err;                                                  \
 
 static inline int rdmsrq_safe(u32 msr, u64 *p)
 {
-       int err;
-
-       *p = native_read_msr_safe(msr, &err);
-       return err;
+       return native_read_msr_safe(msr, p);
 }
 
 static __always_inline u64 rdpmc(int counter)
 
        PVOP_VCALL1(cpu.write_cr4, x);
 }
 
-static inline u64 paravirt_read_msr(unsigned msr)
+static inline u64 paravirt_read_msr(u32 msr)
 {
        return PVOP_CALL1(u64, cpu.read_msr, msr);
 }
        PVOP_VCALL2(cpu.write_msr, msr, val);
 }
 
-static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
+static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
 {
-       return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
+       return PVOP_CALL2(int, cpu.read_msr_safe, msr, val);
 }
 
 static inline int paravirt_write_msr_safe(u32 msr, u64 val)
 /* rdmsr with exception handling */
 #define rdmsr_safe(msr, a, b)                          \
 ({                                                     \
-       int _err;                                       \
-       u64 _l = paravirt_read_msr_safe(msr, &_err);    \
+       u64 _l;                                         \
+       int _err = paravirt_read_msr_safe((msr), &_l);  \
        (*a) = (u32)_l;                                 \
-       (*b) = _l >> 32;                                \
+       (*b) = (u32)(_l >> 32);                         \
        _err;                                           \
 })
 
-static inline int rdmsrq_safe(unsigned msr, u64 *p)
+static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
 {
-       int err;
-
-       *p = paravirt_read_msr_safe(msr, &err);
-       return err;
+       return paravirt_read_msr_safe(msr, p);
 }
 
 static __always_inline u64 rdpmc(int counter)
 
                      unsigned int *ecx, unsigned int *edx);
 
        /* Unsafe MSR operations.  These will warn or panic on failure. */
-       u64 (*read_msr)(unsigned int msr);
+       u64 (*read_msr)(u32 msr);
        void (*write_msr)(u32 msr, u64 val);
 
        /*
         * Safe MSR operations.
-        * read sets err to 0 or -EIO.  write returns 0 or -EIO.
+        * Returns 0 or -EIO.
         */
-       u64 (*read_msr_safe)(unsigned int msr, int *err);
+       int (*read_msr_safe)(u32 msr, u64 *val);
        int (*write_msr_safe)(u32 msr, u64 val);
 
        u64 (*read_pmc)(int counter);
 
 
 static void svm_init_erratum_383(void)
 {
-       int err;
        u64 val;
 
        if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
                return;
 
        /* Use _safe variants to not break nested virtualization */
-       val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
-       if (err)
+       if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
                return;
 
        val |= (1ULL << 47);
         * erratum is present everywhere).
         */
        if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
-               uint64_t len, status = 0;
+               u64 len, status = 0;
                int err;
 
-               len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
+               err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
                if (!err)
-                       status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
-                                                     &err);
+                       err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status);
 
                if (err)
                        osvw_status = osvw_len = 0;
 
 static bool is_erratum_383(void)
 {
-       int err, i;
+       int i;
        u64 value;
 
        if (!erratum_383_found)
                return false;
 
-       value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
-       if (err)
+       if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
                return false;
 
        /* Bit 62 may or may not be set for this mce */
        for (i = 0; i < 6; ++i)
                native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
 
-       value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
-       if (!err) {
+       if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
                value &= ~(1ULL << 2);
                native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
        }
 
        native_write_cr4(cr4);
 }
 
-static u64 xen_do_read_msr(unsigned int msr, int *err)
+static u64 xen_do_read_msr(u32 msr, int *err)
 {
        u64 val = 0;    /* Avoid uninitialized value for safe variant. */
 
                return val;
 
        if (err)
-               val = native_read_msr_safe(msr, err);
+               *err = native_read_msr_safe(msr, &val);
        else
                val = native_read_msr(msr);
 
        }
 }
 
-static u64 xen_read_msr_safe(unsigned int msr, int *err)
+static int xen_read_msr_safe(u32 msr, u64 *val)
 {
-       return xen_do_read_msr(msr, err);
+       int err;
+
+       *val = xen_do_read_msr(msr, &err);
+       return err;
 }
 
 static int xen_write_msr_safe(u32 msr, u64 val)
        return err;
 }
 
-static u64 xen_read_msr(unsigned int msr)
+static u64 xen_read_msr(u32 msr)
 {
        int err;
 
 
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
        if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
-               uint32_t msr;
-               int err;
+               u32 msr;
+               u64 val;
 
                msr = amd_counters_base + (counter * amd_msr_step);
-               return native_read_msr_safe(msr, &err);
+               native_read_msr_safe(msr, &val);
+               return val;
        }
 
        ctxt = &xenpmu_data->pmu.c.amd;
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
        if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
-               uint32_t msr;
-               int err;
+               u32 msr;
+               u64 val;
 
                if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
                        msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
                else
                        msr = MSR_IA32_PERFCTR0 + counter;
 
-               return native_read_msr_safe(msr, &err);
+               native_read_msr_safe(msr, &val);
+               return val;
        }
 
        ctxt = &xenpmu_data->pmu.c.intel;