struct msi_desc *entry;
u16 control;
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ if (affd) {
+ const struct cpumask *max_affinity = NULL;
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+ struct irq_domain *dom = pci_msi_get_device_domain(dev);
+ if (dom)
+ max_affinity = dom->max_affinity;
+#endif
+ masks = irq_create_affinity_masks(nvec, affd, max_affinity);
+ }
/* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks);
int ret, i;
int vec_count = pci_msix_vec_count(dev);
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ if (affd) {
+ const struct cpumask *max_affinity = NULL;
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+ struct irq_domain *dom = pci_msi_get_device_domain(dev);
+ if (dom)
+ max_affinity = dom->max_affinity;
+#endif
+ masks = irq_create_affinity_masks(nvec, affd, max_affinity);
+ }
for (i = 0, curmsk = masks; i < nvec; i++) {
entry = alloc_msi_entry(&dev->dev, 1, curmsk);
/* use legacy IRQ if allowed */
if (flags & PCI_IRQ_LEGACY) {
if (min_vecs == 1 && dev->irq) {
+ const struct cpumask *mask = NULL;
+ struct irq_data *irqd = irq_get_irq_data(dev->irq);
+
+ if (irqd->domain)
+ mask = irqd->domain->max_affinity;
+
/*
* Invoke the affinity spreading logic to ensure that
* the device driver can adjust queue configuration
* for the single interrupt case.
*/
if (affd)
- irq_create_affinity_masks(1, affd);
+ irq_create_affinity_masks(1, affd, mask);
pci_intx(dev, 1);
return 1;
}
*/
static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
unsigned int firstvec,
+ const struct cpumask *max_affinity,
struct irq_affinity_desc *masks)
{
unsigned int curvec = startvec, nr_present = 0, nr_others = 0;
/* Stabilize the cpumasks */
get_online_cpus();
build_node_to_cpumask(node_to_cpumask);
+ if (max_affinity)
+ cpumask_and(npresmsk, cpu_present_mask, max_affinity);
+ else
+ cpumask_copy(npresmsk, cpu_present_mask);
/* Spread on present CPUs starting from affd->pre_vectors */
ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
curvec = firstvec;
else
curvec = firstvec + nr_present;
- cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ cpumask_andnot(npresmsk, npresmsk, cpu_present_mask);
ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
node_to_cpumask, npresmsk, nmsk,
masks);
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
+ * @max_affinity: Optional cpumask limiting the CPUs which can be targeted
*
* Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
struct irq_affinity_desc *
-irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd,
+ const struct cpumask *max_affinity)
{
unsigned int affvecs, curvec, usedvecs, i;
+ struct cpumask *default_mask = NULL;
struct irq_affinity_desc *masks = NULL;
/*
if (!masks)
return NULL;
+ if (!max_affinity)
+ default_mask = irq_default_affinity;
+
/* Fill out vectors at the beginning that don't need affinity */
- for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++) {
+ if (!default_mask) {
+ default_mask = &masks[curvec].mask;
+
+ cpumask_and(default_mask, irq_default_affinity,
+ max_affinity);
+ if (cpumask_empty(default_mask))
+ cpumask_copy(default_mask, max_affinity);
+ }
+ cpumask_copy(&masks[curvec].mask, default_mask);
+ }
/*
* Spread on present CPUs starting from affd->pre_vectors. If we
int ret;
ret = irq_build_affinity_masks(curvec, this_vecs,
- curvec, masks);
+ curvec, max_affinity,
+ masks);
if (ret) {
kfree(masks);
return NULL;
curvec = affd->pre_vectors + affvecs;
else
curvec = affd->pre_vectors + usedvecs;
- for (; curvec < nvecs; curvec++)
- cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+ for (; curvec < nvecs; curvec++) {
+ if (!default_mask) {
+ default_mask = &masks[curvec].mask;
+
+ cpumask_and(default_mask, irq_default_affinity,
+ max_affinity);
+ if (cpumask_empty(default_mask))
+ cpumask_copy(default_mask, max_affinity);
+ }
+ cpumask_copy(&masks[curvec].mask, default_mask);
+ }
/* Mark the managed interrupts */
for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)