avic_deactivate_vmcb(svm);
 }
 
-static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
-                                      unsigned int index)
-{
-       u64 *avic_physical_id_table;
-       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
-
-       if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
-           (index > X2AVIC_MAX_PHYSICAL_ID))
-               return NULL;
-
-       avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
-
-       return &avic_physical_id_table[index];
-}
-
 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
 {
-       u64 *entry, new_entry;
-       int id = vcpu->vcpu_id;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
        struct vcpu_svm *svm = to_svm(vcpu);
+       u32 id = vcpu->vcpu_id;
+       u64 *table, new_entry;
 
        /*
         * Inhibit AVIC if the vCPU ID is bigger than what is supported by AVIC
                return 0;
        }
 
+       BUILD_BUG_ON((AVIC_MAX_PHYSICAL_ID + 1) * sizeof(*table) > PAGE_SIZE ||
+                    (X2AVIC_MAX_PHYSICAL_ID + 1) * sizeof(*table) > PAGE_SIZE);
+
        if (WARN_ON_ONCE(!vcpu->arch.apic->regs))
                return -EINVAL;
 
        }
 
        /* Setting AVIC backing page address in the phy APIC ID table */
-       entry = avic_get_physical_id_entry(vcpu, id);
-       if (!entry)
-               return -EINVAL;
+       table = page_address(kvm_svm->avic_physical_id_table_page);
 
        /* Note, fls64() returns the bit position, +1. */
        BUILD_BUG_ON(__PHYSICAL_MASK_SHIFT >
 
        new_entry = avic_get_backing_page_address(svm) |
                    AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
-       WRITE_ONCE(*entry, new_entry);
+       WRITE_ONCE(table[id], new_entry);
 
-       svm->avic_physical_id_cache = entry;
+       svm->avic_physical_id_cache = &table[id];
 
        return 0;
 }
        if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
                return;
 
+       if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+               return;
+
        /*
         * No need to update anything if the vCPU is blocking, i.e. if the vCPU
         * is being scheduled in after being preempted.  The CPU entries in the
 
        lockdep_assert_preemption_disabled();
 
+       if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+               return;
+
        /*
         * Note, reading the Physical ID entry outside of ir_list_lock is safe
         * as only the pCPU that has loaded (or is loading) the vCPU is allowed