#define SPLIT_HACK_MASK                        0xff000000
 #define SPLIT_HACK_OFFS                        0xfb000000
 
+/*
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
+ * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
+ * (but not its actual threading mode, which is not available) to avoid
+ * collisions.
+ *
+ * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
+ * 0) unchanged: if the guest is filling each VCORE completely then it will be
+ * using consecutive IDs and it will fill the space without any packing.
+ *
+ * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
+ * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
+ * added to avoid collisions.
+ *
+ * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
+ * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
+ * can be safely packed into the second half of each VCORE by adding an offset
+ * of (stride / 2).
+ *
+ * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
+ * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
+ * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
+ *
+ * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
+ * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
+ * must be free to use.
+ *
+ * (The offsets for each block are stored in block_offsets[], indexed by the
+ * block number if the stride is 8. For cases where the guest's stride is less
+ * than 8, we can re-use the block_offsets array by multiplying the block
+ * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
+ */
+static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
+{
+       const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
+       int stride = kvm->arch.emul_smt_mode;
+       int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
+       u32 packed_id;
+
+       if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
+               return 0;
+       packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
+       if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
+               return 0;
+       return packed_id;
+}
+
 #endif /* __ASM_KVM_BOOK3S_H__ */
 
        return threads_per_subcore;
 }
 
-static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
+static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
 {
        struct kvmppc_vcore *vcore;
 
        init_swait_queue_head(&vcore->wq);
        vcore->preempt_tb = TB_NIL;
        vcore->lpcr = kvm->arch.lpcr;
-       vcore->first_vcpuid = core * kvm->arch.smt_mode;
+       vcore->first_vcpuid = id;
        vcore->kvm = kvm;
        INIT_LIST_HEAD(&vcore->preempt_list);
 
                                                   unsigned int id)
 {
        struct kvm_vcpu *vcpu;
-       int err;
+       int err = -EINVAL;
        int core;
        struct kvmppc_vcore *vcore;
 
+       if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode) &&
+           cpu_has_feature(CPU_FTR_ARCH_300)) {
+               pr_devel("DNCI: VCPU ID too high\n");
+               goto out;
+       }
+
        err = -ENOMEM;
        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
        if (!vcpu)
        mutex_lock(&kvm->lock);
        vcore = NULL;
        err = -EINVAL;
-       core = id / kvm->arch.smt_mode;
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               BUG_ON(kvm->arch.smt_mode != 1);
+               core = kvmppc_pack_vcpu_id(kvm, id);
+       } else {
+               core = id / kvm->arch.smt_mode;
+       }
        if (core < KVM_MAX_VCORES) {
                vcore = kvm->arch.vcores[core];
-               if (!vcore) {
+               if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
+                       pr_devel("KVM: collision on id %u", id);
+                       vcore = NULL;
+               } else if (!vcore) {
                        err = -ENOMEM;
-                       vcore = kvmppc_vcore_create(kvm, core);
+                       vcore = kvmppc_vcore_create(kvm,
+                                       id & ~(kvm->arch.smt_mode - 1));
                        kvm->arch.vcores[core] = vcore;
                        kvm->arch.online_vcores++;
                }
 
        return -EBUSY;
 }
 
+static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+       return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
                             struct kvmppc_xive_src_block *sb,
                             struct kvmppc_xive_irq_state *state)
         */
        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
                xive_native_configure_irq(hw_num,
-                                         xive->vp_base + state->act_server,
+                                         xive_vp(xive, state->act_server),
                                          MASKED, state->number);
                /* set old_p so we can track if an H_EOI was done */
                state->old_p = true;
         */
        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
                xive_native_configure_irq(hw_num,
-                                         xive->vp_base + state->act_server,
+                                         xive_vp(xive, state->act_server),
                                          state->act_priority, state->number);
                /* If an EOI is needed, do it here */
                if (!state->old_p)
        kvmppc_xive_select_irq(state, &hw_num, NULL);
 
        return xive_native_configure_irq(hw_num,
-                                        xive->vp_base + server,
+                                        xive_vp(xive, server),
                                         prio, state->number);
 }
 
         * which is fine for a never started interrupt.
         */
        xive_native_configure_irq(hw_irq,
-                                 xive->vp_base + state->act_server,
+                                 xive_vp(xive, state->act_server),
                                  state->act_priority, state->number);
 
        /*
 
        /* Reconfigure the IPI */
        xive_native_configure_irq(state->ipi_number,
-                                 xive->vp_base + state->act_server,
+                                 xive_vp(xive, state->act_server),
                                  state->act_priority, state->number);
 
        /*
                pr_devel("Duplicate !\n");
                return -EEXIST;
        }
-       if (cpu >= KVM_MAX_VCPUS) {
+       if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
                pr_devel("Out of bounds !\n");
                return -EINVAL;
        }
        xc->xive = xive;
        xc->vcpu = vcpu;
        xc->server_num = cpu;
-       xc->vp_id = xive->vp_base + cpu;
+       xc->vp_id = xive_vp(xive, cpu);
        xc->mfrr = 0xff;
        xc->valid = true;