return ret;
 }
 
+static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
+                                          struct its_collection *col)
+{
+       return kvm_get_vcpu_by_id(kvm, col->target_addr);
+}
+
 /*
  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
  * is targeting) to the VGIC's view, which deals with target VCPUs.
        if (!its_is_collection_mapped(ite->collection))
                return;
 
-       vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
+       vcpu = collection_to_vcpu(kvm, ite->collection);
        update_affinity(ite->irq, vcpu);
 }
 
        if (!ite || !its_is_collection_mapped(ite->collection))
                return E_ITS_INT_UNMAPPED_INTERRUPT;
 
-       vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
+       vcpu = collection_to_vcpu(kvm, ite->collection);
        if (!vcpu)
                return E_ITS_INT_UNMAPPED_INTERRUPT;
 
                return E_ITS_MOVI_UNMAPPED_COLLECTION;
 
        ite->collection = collection;
-       vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+       vcpu = collection_to_vcpu(kvm, collection);
 
        vgic_its_invalidate_cache(kvm);
 
        }
 
        if (its_is_collection_mapped(collection))
-               vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+               vcpu = collection_to_vcpu(kvm, collection);
 
        irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
        if (IS_ERR(irq)) {
                                    u64 *its_cmd)
 {
        u16 coll_id;
-       u32 target_addr;
        struct its_collection *collection;
        bool valid;
 
        valid = its_cmd_get_validbit(its_cmd);
        coll_id = its_cmd_get_collection(its_cmd);
-       target_addr = its_cmd_get_target_addr(its_cmd);
-
-       if (target_addr >= atomic_read(&kvm->online_vcpus))
-               return E_ITS_MAPC_PROCNUM_OOR;
 
        if (!valid) {
                vgic_its_free_collection(its, coll_id);
                vgic_its_invalidate_cache(kvm);
        } else {
+               struct kvm_vcpu *vcpu;
+
+               vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
+               if (!vcpu)
+                       return E_ITS_MAPC_PROCNUM_OOR;
+
                collection = find_collection(its, coll_id);
 
                if (!collection) {
                                                        coll_id);
                        if (ret)
                                return ret;
-                       collection->target_addr = target_addr;
+                       collection->target_addr = vcpu->vcpu_id;
                } else {
-                       collection->target_addr = target_addr;
+                       collection->target_addr = vcpu->vcpu_id;
                        update_affinity_collection(kvm, its, collection);
                }
        }
        if (!its_is_collection_mapped(collection))
                return E_ITS_INVALL_UNMAPPED_COLLECTION;
 
-       vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+       vcpu = collection_to_vcpu(kvm, collection);
        vgic_its_invall(vcpu);
 
        return 0;
 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
                                      u64 *its_cmd)
 {
-       u32 target1_addr = its_cmd_get_target_addr(its_cmd);
-       u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
        struct kvm_vcpu *vcpu1, *vcpu2;
        struct vgic_irq *irq;
        u32 *intids;
        int irq_count, i;
 
-       if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
-           target2_addr >= atomic_read(&kvm->online_vcpus))
+       /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
+       vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
+       vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
+
+       if (!vcpu1 || !vcpu2)
                return E_ITS_MOVALL_PROCNUM_OOR;
 
-       if (target1_addr == target2_addr)
+       if (vcpu1 == vcpu2)
                return 0;
 
-       vcpu1 = kvm_get_vcpu(kvm, target1_addr);
-       vcpu2 = kvm_get_vcpu(kvm, target2_addr);
-
        irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
        if (irq_count < 0)
                return irq_count;
                return PTR_ERR(ite);
 
        if (its_is_collection_mapped(collection))
-               vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+               vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
 
        irq = vgic_add_lpi(kvm, lpi_id, vcpu);
        if (IS_ERR(irq)) {
        coll_id = val & KVM_ITS_CTE_ICID_MASK;
 
        if (target_addr != COLLECTION_NOT_MAPPED &&
-           target_addr >= atomic_read(&kvm->online_vcpus))
+           !kvm_get_vcpu_by_id(kvm, target_addr))
                return -EINVAL;
 
        collection = find_collection(its, coll_id);