if (copy_from_user(¶ms, u64_to_user_ptr(argp->data), sizeof(params)))
                return -EFAULT;
 
+       sev->policy = params.policy;
+
        memset(&start, 0, sizeof(start));
 
        dh_blob = NULL;
        if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
                return -EINVAL;
 
+       sev->policy = params.policy;
+
        sev->snp_context = snp_context_create(kvm, argp);
        if (!sev->snp_context)
                return -ENOTTY;
 
        return level;
 }
+
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb_save_area *vmsa;
+       struct kvm_sev_info *sev;
+       int error = 0;
+       int ret;
+
+       if (!sev_es_guest(vcpu->kvm))
+               return NULL;
+
+       /*
+        * If the VMSA has not yet been encrypted, return a pointer to the
+        * current un-encrypted VMSA.
+        */
+       if (!vcpu->arch.guest_state_protected)
+               return (struct vmcb_save_area *)svm->sev_es.vmsa;
+
+       sev = to_kvm_sev_info(vcpu->kvm);
+
+       /* Check if the SEV policy allows debugging */
+       if (sev_snp_guest(vcpu->kvm)) {
+               if (!(sev->policy & SNP_POLICY_DEBUG))
+                       return NULL;
+       } else {
+               if (sev->policy & SEV_POLICY_NODBG)
+                       return NULL;
+       }
+
+       if (sev_snp_guest(vcpu->kvm)) {
+               struct sev_data_snp_dbg dbg = {0};
+
+               vmsa = snp_alloc_firmware_page(__GFP_ZERO);
+               if (!vmsa)
+                       return NULL;
+
+               dbg.gctx_paddr = __psp_pa(sev->snp_context);
+               dbg.src_addr = svm->vmcb->control.vmsa_pa;
+               dbg.dst_addr = __psp_pa(vmsa);
+
+               ret = sev_do_cmd(SEV_CMD_SNP_DBG_DECRYPT, &dbg, &error);
+
+               /*
+                * Return the target page to a hypervisor page no matter what.
+                * If this fails, the page can't be used, so leak it and don't
+                * try to use it.
+                */
+               if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
+                       return NULL;
+
+               if (ret) {
+                       pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
+                              ret, error, error);
+                       free_page((unsigned long)vmsa);
+
+                       return NULL;
+               }
+       } else {
+               struct sev_data_dbg dbg = {0};
+               struct page *vmsa_page;
+
+               vmsa_page = alloc_page(GFP_KERNEL);
+               if (!vmsa_page)
+                       return NULL;
+
+               vmsa = page_address(vmsa_page);
+
+               dbg.handle = sev->handle;
+               dbg.src_addr = svm->vmcb->control.vmsa_pa;
+               dbg.dst_addr = __psp_pa(vmsa);
+               dbg.len = PAGE_SIZE;
+
+               ret = sev_do_cmd(SEV_CMD_DBG_DECRYPT, &dbg, &error);
+               if (ret) {
+                       pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
+                              ret, error, error);
+                       __free_page(vmsa_page);
+
+                       return NULL;
+               }
+       }
+
+       return vmsa;
+}
+
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
+{
+       /* If the VMSA has not yet been encrypted, nothing was allocated */
+       if (!vcpu->arch.guest_state_protected || !vmsa)
+               return;
+
+       free_page((unsigned long)vmsa);
+}
 
        pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
        pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
        pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
+
+       if (sev_es_guest(vcpu->kvm)) {
+               save = sev_decrypt_vmsa(vcpu);
+               if (!save)
+                       goto no_vmsa;
+
+               save01 = save;
+       }
+
        pr_err("VMCB State Save Area:\n");
        pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
               "es:",
        pr_err("%-15s %016llx %-13s %016llx\n",
               "excp_from:", save->last_excp_from,
               "excp_to:", save->last_excp_to);
+
+no_vmsa:
+       if (sev_es_guest(vcpu->kvm))
+               sev_free_decrypted_vmsa(vcpu, save);
 }
 
 static bool svm_check_exit_valid(u64 exit_code)
 
        unsigned int asid;      /* ASID used for this guest */
        unsigned int handle;    /* SEV firmware handle */
        int fd;                 /* SEV device fd */
+       unsigned long policy;
        unsigned long pages_locked; /* Number of pages locked */
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
        struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
 };
 
+#define SEV_POLICY_NODBG       BIT_ULL(0)
+#define SNP_POLICY_DEBUG       BIT_ULL(19)
+
 struct kvm_svm {
        struct kvm kvm;
 
 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
 #else
 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
 {
        return 0;
 }
 
+static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+       return NULL;
+}
+static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
 #endif
 
 /* vmenter.S */