}
 }
 
+static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
+{
+       struct kvmppc_vcore *vcore = target->arch.vcore;
+
+       /*
+        * We expect to have been called by the real mode handler
+        * (kvmppc_rm_h_confer()) which would have directly returned
+        * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
+        * have useful work to do and should not confer) so we don't
+        * recheck that here.
+        */
+
+       spin_lock(&vcore->lock);
+       if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+           vcore->vcore_state != VCORE_INACTIVE)
+               target = vcore->runner;
+       spin_unlock(&vcore->lock);
+
+       return kvm_vcpu_yield_to(target);
+}
+
+static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
+{
+       int yield_count = 0;
+       struct lppaca *lppaca;
+
+       spin_lock(&vcpu->arch.vpa_update_lock);
+       lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
+       if (lppaca)
+               yield_count = lppaca->yield_count;
+       spin_unlock(&vcpu->arch.vpa_update_lock);
+       return yield_count;
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
        unsigned long target, ret = H_SUCCESS;
+       int yield_count;
        struct kvm_vcpu *tvcpu;
        int idx, rc;
 
                        ret = H_PARAMETER;
                        break;
                }
-               kvm_vcpu_yield_to(tvcpu);
+               yield_count = kvmppc_get_gpr(vcpu, 5);
+               if (kvmppc_get_yield_count(tvcpu) != yield_count)
+                       break;
+               kvm_arch_vcpu_yield_to(tvcpu);
                break;
        case H_REGISTER_VPA:
                ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
        vc->vcore_state = VCORE_STARTING;
        vc->in_guest = 0;
        vc->napping_threads = 0;
+       vc->conferring_threads = 0;
 
        /*
         * Updating any of the vpas requires calling kvmppc_pin_guest_page,
 
 #include <linux/memblock.h>
 #include <linux/sizes.h>
 #include <linux/cma.h>
+#include <linux/bitops.h>
 
 #include <asm/cputable.h>
 #include <asm/kvm_ppc.h>
        }
 }
 
+/*
+ * Real-mode H_CONFER implementation.
+ * We check if we are the only vcpu out of this virtual core
+ * still running in the guest and not ceded.  If so, we pop up
+ * to the virtual-mode implementation; if not, just return to
+ * the guest.
+ */
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+                           unsigned int yield_count)
+{
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       int threads_running;
+       int threads_ceded;
+       int threads_conferring;
+       u64 stop = get_tb() + 10 * tb_ticks_per_usec;
+       int rv = H_SUCCESS; /* => don't yield */
+
+       set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
+               threads_running = VCORE_ENTRY_COUNT(vc);
+               threads_ceded = hweight32(vc->napping_threads);
+               threads_conferring = hweight32(vc->conferring_threads);
+               if (threads_ceded + threads_conferring >= threads_running) {
+                       rv = H_TOO_HARD; /* => do yield */
+                       break;
+               }
+       }
+       clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       return rv;
+}
+
 /*
  * When running HV mode KVM we need to block certain operations while KVM VMs
  * exist in the system. We use a counter of VMs to track this.