void *hv_hypercall_pg;
 EXPORT_SYMBOL_GPL(hv_hypercall_pg);
 
-void __percpu **hv_ghcb_pg;
+union hv_ghcb __percpu **hv_ghcb_pg;
 
 /* Storage to save the hypercall page temporarily for hibernation */
 static void *hv_hypercall_pg_saved;
        }
 
        if (hv_isolation_type_snp()) {
-               hv_ghcb_pg = alloc_percpu(void *);
+               hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
                if (!hv_ghcb_pg)
                        goto free_vp_assist_page;
        }
        guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
 
+       /* Hyper-V requires to write guest os id via ghcb in SNP IVM. */
+       hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
+
        hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
                        VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
                        VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
 
 clean_guest_os_id:
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+       hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
        cpuhp_remove_state(cpuhp);
 free_ghcb_page:
        free_percpu(hv_ghcb_pg);
 
        /* Reset our OS id */
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+       hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
 
        /*
         * Reset hypercall page reference before reset the page,
        return hypercall_msr.enable;
 }
 EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
-
-enum hv_isolation_type hv_get_isolation_type(void)
-{
-       if (!(ms_hyperv.priv_high & HV_ISOLATION))
-               return HV_ISOLATION_TYPE_NONE;
-       return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
-}
-EXPORT_SYMBOL_GPL(hv_get_isolation_type);
-
-bool hv_is_isolation_supported(void)
-{
-       if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
-               return false;
-
-       if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
-               return false;
-
-       return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
-}
-
-DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
-
-bool hv_isolation_type_snp(void)
-{
-       return static_branch_unlikely(&isolation_type_snp);
-}
-EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
 
  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
  */
 
+#include <linux/types.h>
+#include <linux/bitfield.h>
 #include <linux/hyperv.h>
 #include <linux/types.h>
 #include <linux/bitfield.h>
 #include <linux/slab.h>
+#include <asm/svm.h>
+#include <asm/sev.h>
 #include <asm/io.h>
 #include <asm/mshyperv.h>
+#include <asm/hypervisor.h>
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+union hv_ghcb {
+       struct ghcb ghcb;
+} __packed __aligned(HV_HYP_PAGE_SIZE);
+
+void hv_ghcb_msr_write(u64 msr, u64 value)
+{
+       union hv_ghcb *hv_ghcb;
+       void **ghcb_base;
+       unsigned long flags;
+       struct es_em_ctxt ctxt;
+
+       if (!hv_ghcb_pg)
+               return;
+
+       WARN_ON(in_nmi());
+
+       local_irq_save(flags);
+       ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
+       hv_ghcb = (union hv_ghcb *)*ghcb_base;
+       if (!hv_ghcb) {
+               local_irq_restore(flags);
+               return;
+       }
+
+       ghcb_set_rcx(&hv_ghcb->ghcb, msr);
+       ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
+       ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
+
+       if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
+                               SVM_EXIT_MSR, 1, 0))
+               pr_warn("Fail to write msr via ghcb %llx.\n", msr);
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
+
+void hv_ghcb_msr_read(u64 msr, u64 *value)
+{
+       union hv_ghcb *hv_ghcb;
+       void **ghcb_base;
+       unsigned long flags;
+       struct es_em_ctxt ctxt;
+
+       /* Check size of union hv_ghcb here. */
+       BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
+
+       if (!hv_ghcb_pg)
+               return;
+
+       WARN_ON(in_nmi());
+
+       local_irq_save(flags);
+       ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
+       hv_ghcb = (union hv_ghcb *)*ghcb_base;
+       if (!hv_ghcb) {
+               local_irq_restore(flags);
+               return;
+       }
+
+       ghcb_set_rcx(&hv_ghcb->ghcb, msr);
+       if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
+                               SVM_EXIT_MSR, 0, 0))
+               pr_warn("Fail to read msr via ghcb %llx.\n", msr);
+       else
+               *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
+                       | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
+#endif
+
+enum hv_isolation_type hv_get_isolation_type(void)
+{
+       if (!(ms_hyperv.priv_high & HV_ISOLATION))
+               return HV_ISOLATION_TYPE_NONE;
+       return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
+}
+EXPORT_SYMBOL_GPL(hv_get_isolation_type);
+
+/*
+ * hv_is_isolation_supported - Check system runs in the Hyper-V
+ * isolation VM.
+ */
+bool hv_is_isolation_supported(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+               return false;
+
+       if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
+               return false;
+
+       return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
+}
+
+DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
+
+/*
+ * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
+ * isolation VM.
+ */
+bool hv_isolation_type_snp(void)
+{
+       return static_branch_unlikely(&isolation_type_snp);
+}
 
 /*
  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
 
 #include <asm/paravirt.h>
 #include <asm/mshyperv.h>
 
+union hv_ghcb;
+
 DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
 
 typedef int (*hyperv_fill_flush_list_func)(
                struct hv_guest_mapping_flush_list *flush,
                void *data);
 
-static inline void hv_set_register(unsigned int reg, u64 value)
-{
-       wrmsrl(reg, value);
-}
-
-static inline u64 hv_get_register(unsigned int reg)
-{
-       u64 value;
-
-       rdmsrl(reg, value);
-       return value;
-}
-
 #define hv_get_raw_timer() rdtsc_ordered()
 
 void hyperv_vector_handler(struct pt_regs *regs);
 
 extern u64 hv_current_partition_id;
 
-extern void __percpu **hv_ghcb_pg;
+extern union hv_ghcb  __percpu **hv_ghcb_pg;
 
 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
                struct hv_interrupt_entry *entry);
 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
 int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+void hv_ghcb_msr_write(u64 msr, u64 value);
+void hv_ghcb_msr_read(u64 msr, u64 *value);
+#else
+static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
+static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
+#endif
+
+extern bool hv_isolation_type_snp(void);
+
+static inline bool hv_is_synic_reg(unsigned int reg)
+{
+       if ((reg >= HV_REGISTER_SCONTROL) &&
+           (reg <= HV_REGISTER_SINT15))
+               return true;
+       return false;
+}
+
+static inline u64 hv_get_register(unsigned int reg)
+{
+       u64 value;
+
+       if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
+               hv_ghcb_msr_read(reg, &value);
+       else
+               rdmsrl(reg, value);
+       return value;
+}
+
+static inline void hv_set_register(unsigned int reg, u64 value)
+{
+       if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
+               hv_ghcb_msr_write(reg, value);
+
+               /* Write proxy bit via wrmsl instruction */
+               if (reg >= HV_REGISTER_SINT0 &&
+                   reg <= HV_REGISTER_SINT15)
+                       wrmsrl(reg, value | 1 << 20);
+       } else {
+               wrmsrl(reg, value);
+       }
+}
+
 #else /* CONFIG_HYPERV */
 static inline void hyperv_init(void) {}
 static inline void hyperv_setup_mmu_ops(void) {}
 {
        return -1;
 }
+static inline void hv_set_register(unsigned int reg, u64 value) { }
+static inline u64 hv_get_register(unsigned int reg) { return 0; }
 static inline int hv_set_mem_host_visibility(unsigned long addr, int numpages,
                                             bool visible)
 {
 
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
                tasklet_init(&hv_cpu->msg_dpc,
                             vmbus_on_msg_dpc, (unsigned long) hv_cpu);
 
-               hv_cpu->synic_message_page =
-                       (void *)get_zeroed_page(GFP_ATOMIC);
-               if (hv_cpu->synic_message_page == NULL) {
-                       pr_err("Unable to allocate SYNIC message page\n");
-                       goto err;
-               }
+               /*
+                * Synic message and event pages are allocated by paravisor.
+                * Skip these pages allocation here.
+                */
+               if (!hv_isolation_type_snp()) {
+                       hv_cpu->synic_message_page =
+                               (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (hv_cpu->synic_message_page == NULL) {
+                               pr_err("Unable to allocate SYNIC message page\n");
+                               goto err;
+                       }
 
-               hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
-               if (hv_cpu->synic_event_page == NULL) {
-                       pr_err("Unable to allocate SYNIC event page\n");
-                       goto err;
+                       hv_cpu->synic_event_page =
+                               (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (hv_cpu->synic_event_page == NULL) {
+                               pr_err("Unable to allocate SYNIC event page\n");
+                               goto err;
+                       }
                }
 
                hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
        /* Setup the Synic's message page */
        simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
        simp.simp_enabled = 1;
-       simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
-               >> HV_HYP_PAGE_SHIFT;
+
+       if (hv_isolation_type_snp()) {
+               hv_cpu->synic_message_page
+                       = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+                                  HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+               if (!hv_cpu->synic_message_page)
+                       pr_err("Fail to map syinc message page.\n");
+       } else {
+               simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+                       >> HV_HYP_PAGE_SHIFT;
+       }
 
        hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
 
        /* Setup the Synic's event page */
        siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 1;
-       siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
-               >> HV_HYP_PAGE_SHIFT;
+
+       if (hv_isolation_type_snp()) {
+               hv_cpu->synic_event_page =
+                       memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
+                                HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+
+               if (!hv_cpu->synic_event_page)
+                       pr_err("Fail to map syinc event page.\n");
+       } else {
+               siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+                       >> HV_HYP_PAGE_SHIFT;
+       }
 
        hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
 
  */
 void hv_synic_disable_regs(unsigned int cpu)
 {
+       struct hv_per_cpu_context *hv_cpu
+               = per_cpu_ptr(hv_context.cpu_context, cpu);
        union hv_synic_sint shared_sint;
        union hv_synic_simp simp;
        union hv_synic_siefp siefp;
                                shared_sint.as_uint64);
 
        simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+       /*
+        * In Isolation VM, sim and sief pages are allocated by
+        * paravisor. These pages also will be used by kdump
+        * kernel. So just reset enable bit here and keep page
+        * addresses.
+        */
        simp.simp_enabled = 0;
-       simp.base_simp_gpa = 0;
+       if (hv_isolation_type_snp())
+               memunmap(hv_cpu->synic_message_page);
+       else
+               simp.base_simp_gpa = 0;
 
        hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
 
        siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 0;
-       siefp.base_siefp_gpa = 0;
+
+       if (hv_isolation_type_snp())
+               memunmap(hv_cpu->synic_event_page);
+       else
+               siefp.base_siefp_gpa = 0;
 
        hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
 
 
 }
 EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
 
+bool __weak hv_isolation_type_snp(void)
+{
+       return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+
 void __weak hv_setup_vmbus_handler(void (*handler)(void))
 {
 }
 
 
 extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
 extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+extern bool hv_isolation_type_snp(void);
 
 /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
 static inline int hv_result(u64 status)