static inline void gic_write_bpr1(u32 val)
  {
-       asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
 -#if defined(__write_sysreg) && defined(ICC_BPR1)
+       write_sysreg(val, ICC_BPR1);
 -#else
 -      asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
 -#endif
  }
  
  /*
 
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
-                       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+                       kvm_vcpu_write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
 +              if (kvm_lapic_hv_timer_in_use(vcpu) &&
 +                              kvm_x86_ops->set_hv_timer(vcpu,
 +                                      kvm_get_lapic_tscdeadline_msr(vcpu)))
 +                      kvm_lapic_switch_to_sw_timer(vcpu);
                /*
                 * On a host with synchronized TSC, there is no need to update
                 * kvmclock on vcpu->cpu migration
 
  #ifdef CONFIG_IRQ_REMAP
        struct irq_domain *ir_domain;
        struct irq_domain *msi_domain;
+ 
+       struct amd_irte_ops *irte_ops;
  #endif
 +
 +      volatile u64 __aligned(8) cmd_sem;
  };
  
  #define ACPIHID_UID_LEN 256