before loading.
                        See Documentation/admin-guide/blockdev/ramdisk.rst.
 
+       prot_virt=      [S390] enable hosting protected virtual machines
+                       isolated from the hypervisor (if hardware supports
+                       that).
+                       Format: <bool>
+
        psi=            [KNL] Enable or disable pressure stall information
                        tracking.
                        Format: <bool>
 
 obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
 obj-y  += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
 obj-y  += version.o pgm_check_info.o ctype.o text_dma.o
-obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST)   += uv.o
+obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
 obj-$(CONFIG_RELOCATABLE)      += machine_kexec_reloc.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr.o
 targets        := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
 
 #include <asm/facility.h>
 #include <asm/sections.h>
 
+/* will be used in arch/s390/kernel/uv.c */
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 int __bootdata_preserved(prot_virt_guest);
+#endif
+#if IS_ENABLED(CONFIG_KVM)
+struct uv_info __bootdata_preserved(uv_info);
+#endif
 
 void uv_query_info(void)
 {
        if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
                return;
 
+       if (IS_ENABLED(CONFIG_KVM)) {
+               memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
+               uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
+               uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
+               uv_info.guest_virt_base_stor_len = uvcb.conf_base_virt_stor_len;
+               uv_info.guest_virt_var_stor_len = uvcb.conf_virt_var_stor_len;
+               uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
+               uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
+               uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
+               uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+       }
+
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
        if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
            test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
                prot_virt_guest = 1;
+#endif
 }
 
        struct uv_cb_header header;
        u64 reserved08;
        u64 inst_calls_list[4];
-       u64 reserved30[15];
+       u64 reserved30[2];
+       u64 uv_base_stor_len;
+       u64 reserved48;
+       u64 conf_base_phys_stor_len;
+       u64 conf_base_virt_stor_len;
+       u64 conf_virt_var_stor_len;
+       u64 cpu_stor_len;
+       u32 reserved70[3];
+       u32 max_num_sec_conf;
+       u64 max_guest_stor_addr;
+       u8  reserved88[158 - 136];
+       u16 max_guest_cpus;
+       u8  reserveda0[200 - 160];
 } __packed __aligned(8);
 
 struct uv_cb_share {
        return cc;
 }
 
+struct uv_info {
+       unsigned long inst_calls_list[4];
+       unsigned long uv_base_stor_len;
+       unsigned long guest_base_stor_len;
+       unsigned long guest_virt_base_stor_len;
+       unsigned long guest_virt_var_stor_len;
+       unsigned long guest_cpu_stor_len;
+       unsigned long max_sec_stor_addr;
+       unsigned int max_num_sec_conf;
+       unsigned short max_guest_cpus;
+};
+
+extern struct uv_info uv_info;
+
 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 extern int prot_virt_guest;
 
        return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
 }
 
-void uv_query_info(void);
 #else
 #define is_prot_virt_guest() 0
 static inline int uv_set_shared(unsigned long addr) { return 0; }
 static inline int uv_remove_shared(unsigned long addr) { return 0; }
+#endif
+
+#if IS_ENABLED(CONFIG_KVM)
+extern int prot_virt_host;
+
+static inline int is_prot_virt_host(void)
+{
+       return prot_virt_host;
+}
+#else
+#define is_prot_virt_host() 0
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+void uv_query_info(void);
+#else
 static inline void uv_query_info(void) {}
 #endif
 
 
 obj-$(CONFIG_PERF_EVENTS)      += perf_cpum_cf_diag.o
 
 obj-$(CONFIG_TRACEPOINTS)      += trace.o
+obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
 
 # vdso
 obj-y                          += vdso64/
 
 
 unsigned long int_hwcap = 0;
 
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
-int __bootdata_preserved(prot_virt_guest);
-#endif
-
 int __bootdata(noexec_disabled);
 int __bootdata(memory_end_set);
 unsigned long __bootdata(memory_end);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Ultravisor functions and initialization
+ *
+ * Copyright IBM Corp. 2019, 2020
+ */
+#define KMSG_COMPONENT "prot_virt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/bitmap.h>
+#include <linux/memblock.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+#include <asm/uv.h>
+
+/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+int __bootdata_preserved(prot_virt_guest);
+#endif
+
+#if IS_ENABLED(CONFIG_KVM)
+int prot_virt_host;
+EXPORT_SYMBOL(prot_virt_host);
+struct uv_info __bootdata_preserved(uv_info);
+EXPORT_SYMBOL(uv_info);
+
+static int __init prot_virt_setup(char *val)
+{
+       bool enabled;
+       int rc;
+
+       rc = kstrtobool(val, &enabled);
+       if (!rc && enabled)
+               prot_virt_host = 1;
+
+       if (is_prot_virt_guest() && prot_virt_host) {
+               prot_virt_host = 0;
+               pr_warn("Protected virtualization not available in protected guests.");
+       }
+
+       if (prot_virt_host && !test_facility(158)) {
+               prot_virt_host = 0;
+               pr_warn("Protected virtualization not supported by the hardware.");
+       }
+
+       return rc;
+}
+early_param("prot_virt", prot_virt_setup);
+#endif