};
 
 struct kvm_vcpu_stat {
+       u32 halt_successful_poll;
        u32 halt_wakeup;
 };
 
 
 };
 
 struct kvm_vcpu_stat {
+       u32 halt_successful_poll;
        u32 halt_wakeup;
 };
 
 
        u32 resvd_inst_exits;
        u32 break_inst_exits;
        u32 flush_dcache_exits;
+       u32 halt_successful_poll;
        u32 halt_wakeup;
 };
 
 
        { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
        { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
        { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
        { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
        {NULL}
 };
 
        u32 emulated_inst_exits;
        u32 dec_exits;
        u32 ext_intr_exits;
+       u32 halt_successful_poll;
        u32 halt_wakeup;
        u32 dbell_exits;
        u32 gdbell_exits;
 
        { "dec",         VCPU_STAT(dec_exits) },
        { "ext_intr",    VCPU_STAT(ext_intr_exits) },
        { "queue_intr",  VCPU_STAT(queue_intr) },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "pf_storage",  VCPU_STAT(pf_storage) },
        { "sp_storage",  VCPU_STAT(sp_storage) },
 
        { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
        { "dec",        VCPU_STAT(dec_exits) },
        { "ext_intr",   VCPU_STAT(ext_intr_exits) },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "doorbell", VCPU_STAT(dbell_exits) },
        { "guest doorbell", VCPU_STAT(gdbell_exits) },
 
        u32 exit_stop_request;
        u32 exit_validity;
        u32 exit_instruction;
+       u32 halt_successful_poll;
        u32 halt_wakeup;
        u32 instruction_lctl;
        u32 instruction_lctlg;
 
        { "exit_instruction", VCPU_STAT(exit_instruction) },
        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
 
        u32 irq_window_exits;
        u32 nmi_window_exits;
        u32 halt_exits;
+       u32 halt_successful_poll;
        u32 halt_wakeup;
        u32 request_irq_exits;
        u32 irq_exits;
 
        { "irq_window", VCPU_STAT(irq_window_exits) },
        { "nmi_window", VCPU_STAT(nmi_window_exits) },
        { "halt_exits", VCPU_STAT(halt_exits) },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "hypercalls", VCPU_STAT(hypercalls) },
        { "request_irq", VCPU_STAT(request_irq_exits) },
 
                  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 );
 
+TRACE_EVENT(kvm_vcpu_wakeup,
+           TP_PROTO(__u64 ns, bool waited),
+           TP_ARGS(ns, waited),
+
+       TP_STRUCT__entry(
+               __field(        __u64,          ns              )
+               __field(        bool,           waited          )
+       ),
+
+       TP_fast_assign(
+               __entry->ns             = ns;
+               __entry->waited         = waited;
+       ),
+
+       TP_printk("%s time %lld ns",
+                 __entry->waited ? "wait" : "poll",
+                 __entry->ns)
+);
+
 #if defined(CONFIG_HAVE_KVM_IRQFD)
 TRACE_EVENT(kvm_set_irq,
        TP_PROTO(unsigned int gsi, int level, int irq_source_id),
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
+unsigned int halt_poll_ns = 0;
+module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+
 /*
  * Ordering of locks:
  *
 }
 EXPORT_SYMBOL_GPL(mark_page_dirty);
 
+static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+       if (kvm_arch_vcpu_runnable(vcpu)) {
+               kvm_make_request(KVM_REQ_UNHALT, vcpu);
+               return -EINTR;
+       }
+       if (kvm_cpu_has_pending_timer(vcpu))
+               return -EINTR;
+       if (signal_pending(current))
+               return -EINTR;
+
+       return 0;
+}
+
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  */
 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 {
+       ktime_t start, cur;
        DEFINE_WAIT(wait);
+       bool waited = false;
+
+       start = cur = ktime_get();
+       if (halt_poll_ns) {
+               ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+               do {
+                       /*
+                        * This sets KVM_REQ_UNHALT if an interrupt
+                        * arrives.
+                        */
+                       if (kvm_vcpu_check_block(vcpu) < 0) {
+                               ++vcpu->stat.halt_successful_poll;
+                               goto out;
+                       }
+                       cur = ktime_get();
+               } while (single_task_running() && ktime_before(cur, stop));
+       }
 
        for (;;) {
                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
 
-               if (kvm_arch_vcpu_runnable(vcpu)) {
-                       kvm_make_request(KVM_REQ_UNHALT, vcpu);
-                       break;
-               }
-               if (kvm_cpu_has_pending_timer(vcpu))
-                       break;
-               if (signal_pending(current))
+               if (kvm_vcpu_check_block(vcpu) < 0)
                        break;
 
+               waited = true;
                schedule();
        }
 
        finish_wait(&vcpu->wq, &wait);
+       cur = ktime_get();
+
+out:
+       trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_block);