if (vfio_pci_nointx(pdev)) {
pci_info(pdev, "Masking broken INTx support\n");
vdev->nointx = true;
- pci_intx(pdev, 0);
+ pci_intx_unmanaged(pdev, 0);
} else
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
}
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
- pci_intx(pdev, 0);
+ pci_intx_unmanaged(pdev, 0);
goto out_unlock;
}
* mask, not just when something is pending.
*/
if (vdev->pci_2_3)
- pci_intx(pdev, 0);
+ pci_intx_unmanaged(pdev, 0);
else
disable_irq_nosync(pdev->irq);
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
- pci_intx(pdev, 1);
+ pci_intx_unmanaged(pdev, 1);
goto out_unlock;
}
*/
ctx->masked = vdev->virq_disabled;
if (vdev->pci_2_3) {
- pci_intx(pdev, !ctx->masked);
+ pci_intx_unmanaged(pdev, !ctx->masked);
irqflags = IRQF_SHARED;
} else {
irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
* via their shutdown paths. Restore for NoINTx devices.
*/
if (vdev->nointx)
- pci_intx(pdev, 0);
+ pci_intx_unmanaged(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
}