{
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (svm_nested_virtualize_tpr(vcpu))
+       if (nested_svm_virtualize_tpr(vcpu))
                return;
 
        clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (svm_nested_virtualize_tpr(vcpu))
+       if (nested_svm_virtualize_tpr(vcpu))
                return;
 
        if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 cr8;
 
-       if (svm_nested_virtualize_tpr(vcpu) ||
+       if (nested_svm_virtualize_tpr(vcpu) ||
            kvm_vcpu_apicv_active(vcpu))
                return;
 
 
 #define NESTED_EXIT_DONE       1       /* Exit caused nested vmexit  */
 #define NESTED_EXIT_CONTINUE   2       /* Further checks needed      */
 
-static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
+static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);