pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
                } else {
                        pi_data.cpu = -1;
-                       pi_data.ga_log_intr = true;
+                       pi_data.ga_log_intr = entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR;
                }
 
                ret = irq_set_vcpu_affinity(host_irq, &pi_data);
 
        /*
         * No unique action is required to deal with a vCPU that stops/starts
-        * running, as IRTEs are configured to generate GALog interrupts at all
-        * times.
+        * running.  A vCPU that starts running by definition stops blocking as
+        * well, and a vCPU that stops running can't have been blocking, i.e.
+        * doesn't need to toggle GALogIntr.
         */
        AVIC_START_RUNNING      = 0,
        AVIC_STOP_RUNNING       = 0,
+
+       /*
+        * When a vCPU starts blocking, KVM needs to set the GALogIntr flag
+        * int all associated IRTEs so that KVM can wake the vCPU if an IRQ is
+        * sent to the vCPU.
+        */
+       AVIC_START_BLOCKING     = BIT(1),
 };
 
 static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
                                            enum avic_vcpu_action action)
 {
+       bool ga_log_intr = (action & AVIC_START_BLOCKING);
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_kernel_irqfd *irqfd;
 
                void *data = irqfd->irq_bypass_data;
 
                if (!(action & AVIC_TOGGLE_ON_OFF))
-                       WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, true));
+                       WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, ga_log_intr));
                else if (cpu >= 0)
-                       WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, true));
+                       WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, ga_log_intr));
                else
                        WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
        }
        entry = svm->avic_physical_id_entry;
        WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
 
-       entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+       entry &= ~(AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK |
+                  AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR);
        entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
        entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
 
 
        avic_update_iommu_vcpu_affinity(vcpu, -1, action);
 
+       WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR);
+
+       /*
+        * Keep the previous APIC ID in the entry so that a rogue doorbell from
+        * hardware is at least restricted to a CPU associated with the vCPU.
+        */
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
-       svm->avic_physical_id_entry = entry;
 
        if (enable_ipiv)
                WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
 
+       /*
+        * Note!  Don't set AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR in the table as
+        * it's a synthetic flag that usurps an unused should-be-zero bit.
+        */
+       if (action & AVIC_START_BLOCKING)
+               entry |= AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR;
+
+       svm->avic_physical_id_entry = entry;
+
        spin_unlock_irqrestore(&svm->ir_list_lock, flags);
 }
 
         */
        u64 entry = to_svm(vcpu)->avic_physical_id_entry;
 
-       /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
-       if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
-               return;
+       /*
+        * Nothing to do if IsRunning == '0' due to vCPU blocking, i.e. if the
+        * vCPU is preempted while its in the process of blocking.  WARN if the
+        * vCPU wasn't running and isn't blocking, KVM shouldn't attempt to put
+        * the AVIC if it wasn't previously loaded.
+        */
+       if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) {
+               if (WARN_ON_ONCE(!kvm_vcpu_is_blocking(vcpu)))
+                       return;
+
+               /*
+                * The vCPU was preempted while blocking, ensure its IRTEs are
+                * configured to generate GA Log Interrupts.
+                */
+               if (!(WARN_ON_ONCE(!(entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR))))
+                       return;
+       }
 
-       __avic_vcpu_put(vcpu, AVIC_STOP_RUNNING);
+       __avic_vcpu_put(vcpu, kvm_vcpu_is_blocking(vcpu) ? AVIC_START_BLOCKING :
+                                                          AVIC_STOP_RUNNING);
 }
 
 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
         * CPU and cause noisy neighbor problems if the VM is sending interrupts
         * to the vCPU while it's scheduled out.
         */
-       __avic_vcpu_put(vcpu, AVIC_STOP_RUNNING);
+       __avic_vcpu_put(vcpu, AVIC_START_BLOCKING);
 }
 
 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)