{
        if (!kvm_is_reserved_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
+
                if (!PageReserved(page))
                        SetPageDirty(page);
        }
        start = cur = ktime_get();
        if (halt_poll_ns) {
                ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+
                do {
                        /*
                         * This sets KVM_REQ_UNHALT if an interrupt
                        /* The thread running this VCPU changed. */
                        struct pid *oldpid = vcpu->pid;
                        struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+
                        rcu_assign_pointer(vcpu->pid, newpid);
                        if (oldpid)
                                synchronize_rcu();
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
        case KVM_REGISTER_COALESCED_MMIO: {
                struct kvm_coalesced_mmio_zone zone;
+
                r = -EFAULT;
                if (copy_from_user(&zone, argp, sizeof(zone)))
                        goto out;
        }
        case KVM_UNREGISTER_COALESCED_MMIO: {
                struct kvm_coalesced_mmio_zone zone;
+
                r = -EFAULT;
                if (copy_from_user(&zone, argp, sizeof(zone)))
                        goto out;
 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
+
        if (vcpu->preempted)
                vcpu->preempted = false;