return true;
 }
 
-static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
-                                         bool enc)
+static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
+                                        bool enc)
 {
        /*
         * Only handle shared->private conversion here.
         * See the comment in tdx_early_init().
         */
-       if (enc)
-               return tdx_enc_status_changed(vaddr, numpages, enc);
-       return true;
+       if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+               return -EIO;
+
+       return 0;
 }
 
-static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
+static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
                                         bool enc)
 {
        /*
         * Only handle private->shared conversion here.
         * See the comment in tdx_early_init().
         */
-       if (!enc)
-               return tdx_enc_status_changed(vaddr, numpages, enc);
-       return true;
+       if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+               return -EIO;
+
+       return 0;
 }
 
 void __init tdx_early_init(void)
 
  * transition is complete, hv_vtom_set_host_visibility() marks the pages
  * as "present" again.
  */
-static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
+static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
 {
-       return !set_memory_np(kbuffer, pagecount);
+       return set_memory_np(kbuffer, pagecount);
 }
 
 /*
  * with host. This function works as wrap of hv_mark_gpa_visibility()
  * with memory base and size.
  */
-static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
+static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
 {
        enum hv_mem_host_visibility visibility = enc ?
                        VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
        u64 *pfn_array;
        phys_addr_t paddr;
+       int i, pfn, err;
        void *vaddr;
        int ret = 0;
-       bool result = true;
-       int i, pfn;
 
        pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
        if (!pfn_array) {
-               result = false;
+               ret = -ENOMEM;
                goto err_set_memory_p;
        }
 
                if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
                        ret = hv_mark_gpa_visibility(pfn, pfn_array,
                                                     visibility);
-                       if (ret) {
-                               result = false;
+                       if (ret)
                                goto err_free_pfn_array;
-                       }
                        pfn = 0;
                }
        }
         * order to avoid leaving the memory range in a "broken" state. Setting
         * the PRESENT bits shouldn't fail, but return an error if it does.
         */
-       if (set_memory_p(kbuffer, pagecount))
-               result = false;
+       err = set_memory_p(kbuffer, pagecount);
+       if (err && !ret)
+               ret = err;
 
-       return result;
+       return ret;
 }
 
 static bool hv_vtom_tlb_flush_required(bool private)
 
  * @enc_cache_flush_required   Returns true if a cache flush is needed before changing page encryption status
  */
 struct x86_guest {
-       bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
-       bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+       int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+       int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
        bool (*enc_tlb_flush_required)(bool enc);
        bool (*enc_cache_flush_required)(void);
 };
 
 
 static void default_nmi_init(void) { };
 
-static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
-static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; }
+static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
+static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
 static bool enc_tlb_flush_required_noop(bool enc) { return false; }
 static bool enc_cache_flush_required_noop(void) { return false; }
 static bool is_private_mmio_noop(u64 addr) {return false; }
 
 #endif
 }
 
-static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
 {
        /*
         * To maintain the security guarantees of SEV-SNP guests, make sure
        if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
                snp_set_memory_shared(vaddr, npages);
 
-       return true;
+       return 0;
 }
 
 /* Return true unconditionally: return value doesn't matter for the SEV side */
-static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
+static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
 {
        /*
         * After memory is mapped encrypted in the page table, validate it
        if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
 
-       return true;
+       return 0;
 }
 
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 
                cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
 
        /* Notify hypervisor that we are about to set/clr encryption attribute. */
-       if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
+       ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
+       if (ret)
                goto vmm_fail;
 
        ret = __change_page_attr_set_clr(&cpa, 1);
                return ret;
 
        /* Notify hypervisor that we have successfully set/clr encryption attribute. */
-       if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
+       ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
+       if (ret)
                goto vmm_fail;
 
        return 0;
 
 vmm_fail:
-       WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
-                 (void *)addr, numpages, enc ? "private" : "shared");
+       WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s: %d\n",
+                 (void *)addr, numpages, enc ? "private" : "shared", ret);
 
-       return -EIO;
+       return ret;
 }
 
 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)