#define _ASM_ARM_PARAVIRT_H
 
 #ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
 struct static_key;
 extern struct static_key paravirt_steal_enabled;
 extern struct static_key paravirt_steal_rq_enabled;
 
-struct pv_time_ops {
-       unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
-       struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
 
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return pv_ops.time.steal_clock(cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 #endif
 
 
 #include <linux/export.h>
 #include <linux/jump_label.h>
 #include <linux/types.h>
+#include <linux/static_call.h>
 #include <asm/paravirt.h>
 
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+       return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 
 #define _ASM_ARM64_PARAVIRT_H
 
 #ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
 struct static_key;
 extern struct static_key paravirt_steal_enabled;
 extern struct static_key paravirt_steal_rq_enabled;
 
-struct pv_time_ops {
-       unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
-       struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
 
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return pv_ops.time.steal_clock(cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 
 int __init pv_time_init(void);
 
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/static_call.h>
 
 #include <asm/paravirt.h>
 #include <asm/pvclock-abi.h>
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+       return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 
 struct pv_time_stolen_time_region {
        struct pvclock_vcpu_stolen_time *kaddr;
 early_param("no-steal-acc", parse_no_stealacc);
 
 /* return stolen time in ns by asking the hypervisor */
-static u64 pv_steal_clock(int cpu)
+static u64 para_steal_clock(int cpu)
 {
        struct pv_time_stolen_time_region *reg;
 
        if (ret)
                return ret;
 
-       pv_ops.time.steal_clock = pv_steal_clock;
+       static_call_update(pv_steal_clock, para_steal_clock);
 
        static_key_slow_inc(¶virt_steal_enabled);
        if (steal_acc)
 
 
 config PARAVIRT
        bool "Enable paravirtualization code"
+       depends on HAVE_STATIC_CALL
        help
          This changes the kernel so it can modify itself when it is run
          under a hypervisor, potentially improving performance significantly
 
 static __always_inline void hv_setup_sched_clock(void *sched_clock)
 {
 #ifdef CONFIG_PARAVIRT
-       pv_ops.time.sched_clock = sched_clock;
+       paravirt_set_sched_clock(sched_clock);
 #endif
 }
 
 
 #include <linux/bug.h>
 #include <linux/types.h>
 #include <linux/cpumask.h>
+#include <linux/static_call_types.h>
 #include <asm/frame.h>
 
-static inline unsigned long long paravirt_sched_clock(void)
+u64 dummy_steal_clock(int cpu);
+u64 dummy_sched_clock(void);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void));
+
+static inline u64 paravirt_sched_clock(void)
 {
-       return PVOP_CALL0(unsigned long long, time.sched_clock);
+       return static_call(pv_sched_clock)();
 }
 
 struct static_key;
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return PVOP_CALL1(u64, time.steal_clock, cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 
 /* The paravirtualized I/O functions */
 
 } __no_randomize_layout;
 #endif
 
-struct pv_time_ops {
-       unsigned long long (*sched_clock)(void);
-       unsigned long long (*steal_clock)(int cpu);
-} __no_randomize_layout;
-
 struct pv_cpu_ops {
        /* hooks for various privileged instructions */
        void (*io_delay)(void);
  * what to patch. */
 struct paravirt_patch_template {
        struct pv_init_ops      init;
-       struct pv_time_ops      time;
        struct pv_cpu_ops       cpu;
        struct pv_irq_ops       irq;
        struct pv_mmu_ops       mmu;
 
 #include <linux/clocksource.h>
 #include <linux/cpu.h>
 #include <linux/reboot.h>
+#include <linux/static_call.h>
 #include <asm/div64.h>
 #include <asm/x86_init.h>
 #include <asm/hypervisor.h>
        vmware_cyc2ns_setup();
 
        if (vmw_sched_clock)
-               pv_ops.time.sched_clock = vmware_sched_clock;
+               paravirt_set_sched_clock(vmware_sched_clock);
 
        if (vmware_is_stealclock_available()) {
                has_steal_clock = true;
-               pv_ops.time.steal_clock = vmware_steal_clock;
+               static_call_update(pv_steal_clock, vmware_steal_clock);
 
                /* We use reboot notifier only to disable steal clock */
                register_reboot_notifier(&vmware_pv_reboot_nb);
 
 
        if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                has_steal_clock = 1;
-               pv_ops.time.steal_clock = kvm_steal_clock;
+               static_call_update(pv_steal_clock, kvm_steal_clock);
        }
 
        if (pv_tlb_flush_supported()) {
 
        if (!stable)
                clear_sched_clock_stable();
        kvm_sched_clock_offset = kvm_clock_read();
-       pv_ops.time.sched_clock = kvm_sched_clock_read;
+       paravirt_set_sched_clock(kvm_sched_clock_read);
 
        pr_info("kvm-clock: using sched offset of %llu cycles",
                kvm_sched_clock_offset);
 
 #include <linux/highmem.h>
 #include <linux/kprobes.h>
 #include <linux/pgtable.h>
+#include <linux/static_call.h>
 
 #include <asm/bug.h>
 #include <asm/paravirt.h>
        return 0;
 }
 
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void))
+{
+       static_call_update(pv_sched_clock, func);
+}
+
 /* These are in entry.S */
 extern void native_iret(void);
 
        /* Init ops. */
        .init.patch             = native_patch,
 
-       /* Time ops. */
-       .time.sched_clock       = native_sched_clock,
-       .time.steal_clock       = native_steal_clock,
-
        /* Cpu ops. */
        .cpu.io_delay           = native_io_delay,
 
 
 #include <linux/percpu.h>
 #include <linux/timex.h>
 #include <linux/static_key.h>
+#include <linux/static_call.h>
 
 #include <asm/hpet.h>
 #include <asm/timer.h>
 
 bool using_native_sched_clock(void)
 {
-       return pv_ops.time.sched_clock == native_sched_clock;
+       return static_call_query(pv_sched_clock) == native_sched_clock;
 }
 #else
 unsigned long long
 
        }
 }
 
-static const struct pv_time_ops xen_time_ops __initconst = {
-       .sched_clock = xen_sched_clock,
-       .steal_clock = xen_steal_clock,
-};
-
 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
 static u64 xen_clock_value_saved;
 
                pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
 }
 
-void __init xen_init_time_ops(void)
+static void __init xen_init_time_common(void)
 {
        xen_sched_clock_offset = xen_clocksource_read();
-       pv_ops.time = xen_time_ops;
+       static_call_update(pv_steal_clock, xen_steal_clock);
+       paravirt_set_sched_clock(xen_sched_clock);
+
+       x86_platform.calibrate_tsc = xen_tsc_khz;
+       x86_platform.get_wallclock = xen_get_wallclock;
+}
+
+void __init xen_init_time_ops(void)
+{
+       xen_init_time_common();
 
        x86_init.timers.timer_init = xen_time_init;
        x86_init.timers.setup_percpu_clockev = x86_init_noop;
        x86_cpuinit.setup_percpu_clockev = x86_init_noop;
 
-       x86_platform.calibrate_tsc = xen_tsc_khz;
-       x86_platform.get_wallclock = xen_get_wallclock;
        /* Dom0 uses the native method to set the hardware RTC. */
        if (!xen_initial_domain())
                x86_platform.set_wallclock = xen_set_wallclock;
                return;
        }
 
-       xen_sched_clock_offset = xen_clocksource_read();
-       pv_ops.time = xen_time_ops;
+       xen_init_time_common();
+
        x86_init.timers.setup_percpu_clockev = xen_time_init;
        x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
 
-       x86_platform.calibrate_tsc = xen_tsc_khz;
-       x86_platform.get_wallclock = xen_get_wallclock;
        x86_platform.set_wallclock = xen_set_wallclock;
 }
 #endif
 
 #include <linux/math64.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/static_call.h>
 
 #include <asm/paravirt.h>
 #include <asm/xen/hypervisor.h>
        xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
                                        VMASST_TYPE_runstate_update_flag);
 
-       pv_ops.time.steal_clock = xen_steal_clock;
+       static_call_update(pv_steal_clock, xen_steal_clock);
 
        static_key_slow_inc(¶virt_steal_enabled);
        if (xen_runstate_remote)