return 0;
 }
 
-u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 {
        struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
        return vmcb->control.tsc_offset +
 
  * Like guest_read_tsc, but always returns L1's notion of the timestamp
  * counter, even if a nested guest (L2) is currently running.
  */
-u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 {
        u64 tsc_offset;
 
        /* Unpin physical memory we referred to in current vmcs02 */
        if (vmx->nested.apic_access_page) {
                nested_release_page(vmx->nested.apic_access_page);
-               vmx->nested.apic_access_page = 0;
+               vmx->nested.apic_access_page = NULL;
        }
        if (vmx->nested.virtual_apic_page) {
                nested_release_page(vmx->nested.virtual_apic_page);
-               vmx->nested.virtual_apic_page = 0;
+               vmx->nested.virtual_apic_page = NULL;
        }
 
        nested_free_all_saved_vmcss(vmx);
        /* Unpin physical memory we referred to in vmcs02 */
        if (vmx->nested.apic_access_page) {
                nested_release_page(vmx->nested.apic_access_page);
-               vmx->nested.apic_access_page = 0;
+               vmx->nested.apic_access_page = NULL;
        }
        if (vmx->nested.virtual_apic_page) {
                nested_release_page(vmx->nested.virtual_apic_page);
-               vmx->nested.virtual_apic_page = 0;
+               vmx->nested.virtual_apic_page = NULL;
        }
 
        /*
        return X86EMUL_CONTINUE;
 }
 
-void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
+static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
        if (ple_gap)
                shrink_ple_window(vcpu);