]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
genirq: Add max_affinity argument to irq_create_affinity_masks()
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 6 Oct 2020 11:06:38 +0000 (12:06 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 7 Oct 2020 10:28:14 +0000 (11:28 +0100)
Restrict the generated affinities to the capabilities of the MSI IRQ
domain, where necessary.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
drivers/pci/msi.c
include/linux/interrupt.h
kernel/irq/affinity.c

index d52d118979a6d31ca592e68fd5c54e654ee83024..83847d84eaffab4749d9127e8c5bcf4b902f65e9 100644 (file)
@@ -562,8 +562,15 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
        struct msi_desc *entry;
        u16 control;
 
-       if (affd)
-               masks = irq_create_affinity_masks(nvec, affd);
+       if (affd) {
+               const struct cpumask *max_affinity = NULL;
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+               struct irq_domain *dom = pci_msi_get_device_domain(dev);
+               if (dom)
+                       max_affinity = dom->max_affinity;
+#endif
+               masks = irq_create_affinity_masks(nvec, affd, max_affinity);
+       }
 
        /* MSI Entry Initialization */
        entry = alloc_msi_entry(&dev->dev, nvec, masks);
@@ -701,8 +708,15 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
        int ret, i;
        int vec_count = pci_msix_vec_count(dev);
 
-       if (affd)
-               masks = irq_create_affinity_masks(nvec, affd);
+       if (affd) {
+               const struct cpumask *max_affinity = NULL;
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+               struct irq_domain *dom = pci_msi_get_device_domain(dev);
+               if (dom)
+                       max_affinity = dom->max_affinity;
+#endif
+               masks = irq_create_affinity_masks(nvec, affd, max_affinity);
+       }
 
        for (i = 0, curmsk = masks; i < nvec; i++) {
                entry = alloc_msi_entry(&dev->dev, 1, curmsk);
@@ -1218,13 +1232,19 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
        /* use legacy IRQ if allowed */
        if (flags & PCI_IRQ_LEGACY) {
                if (min_vecs == 1 && dev->irq) {
+                       const struct cpumask *mask = NULL;
+                       struct irq_data *irqd = irq_get_irq_data(dev->irq);
+
+                       if (irqd->domain)
+                               mask = irqd->domain->max_affinity;
+
                        /*
                         * Invoke the affinity spreading logic to ensure that
                         * the device driver can adjust queue configuration
                         * for the single interrupt case.
                         */
                        if (affd)
-                               irq_create_affinity_masks(1, affd);
+                               irq_create_affinity_masks(1, affd, mask);
                        pci_intx(dev, 1);
                        return 1;
                }
index cd0ff293486aac0cfec636d809c6e5ea970c92bf..4ec6420abb8f9e2abacce55f68ddc585ca04fa45 100644 (file)
@@ -357,7 +357,8 @@ extern int
 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
 
 struct irq_affinity_desc *
-irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd,
+                         const struct cpumask *max_affinity);
 
 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
                                       const struct irq_affinity *affd);
index 4d89ad4fae3bb15e5c500a6ccc2f19aa9d63367b..3fba9220fea65255485cf51f17893a752926071a 100644 (file)
@@ -337,6 +337,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
  */
 static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                                    unsigned int firstvec,
+                                   const struct cpumask *max_affinity,
                                    struct irq_affinity_desc *masks)
 {
        unsigned int curvec = startvec, nr_present = 0, nr_others = 0;
@@ -357,6 +358,10 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
        /* Stabilize the cpumasks */
        get_online_cpus();
        build_node_to_cpumask(node_to_cpumask);
+       if (max_affinity)
+               cpumask_and(npresmsk, cpu_present_mask, max_affinity);
+       else
+               cpumask_copy(npresmsk, cpu_present_mask);
 
        /* Spread on present CPUs starting from affd->pre_vectors */
        ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
@@ -376,7 +381,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                curvec = firstvec;
        else
                curvec = firstvec + nr_present;
-       cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+       cpumask_andnot(npresmsk, npresmsk, cpu_present_mask);
        ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
                                         node_to_cpumask, npresmsk, nmsk,
                                         masks);
@@ -409,13 +414,16 @@ static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
  * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
  * @nvecs:     The total number of vectors
  * @affd:      Description of the affinity requirements
+ * @max_affinity: Optional cpumask limiting the CPUs which can be targeted
  *
  * Returns the irq_affinity_desc pointer or NULL if allocation failed.
  */
 struct irq_affinity_desc *
-irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd,
+                         const struct cpumask *max_affinity)
 {
        unsigned int affvecs, curvec, usedvecs, i;
+       struct cpumask *default_mask = NULL;
        struct irq_affinity_desc *masks = NULL;
 
        /*
@@ -450,9 +458,21 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
        if (!masks)
                return NULL;
 
+       if (!max_affinity)
+               default_mask = irq_default_affinity;
+
        /* Fill out vectors at the beginning that don't need affinity */
-       for (curvec = 0; curvec < affd->pre_vectors; curvec++)
-               cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+       for (curvec = 0; curvec < affd->pre_vectors; curvec++) {
+               if (!default_mask) {
+                       default_mask = &masks[curvec].mask;
+
+                       cpumask_and(default_mask, irq_default_affinity,
+                                   max_affinity);
+                       if (cpumask_empty(default_mask))
+                               cpumask_copy(default_mask, max_affinity);
+               }
+               cpumask_copy(&masks[curvec].mask, default_mask);
+       }
 
        /*
         * Spread on present CPUs starting from affd->pre_vectors. If we
@@ -463,7 +483,8 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
                int ret;
 
                ret = irq_build_affinity_masks(curvec, this_vecs,
-                                              curvec, masks);
+                                              curvec, max_affinity,
+                                              masks);
                if (ret) {
                        kfree(masks);
                        return NULL;
@@ -477,8 +498,17 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
                curvec = affd->pre_vectors + affvecs;
        else
                curvec = affd->pre_vectors + usedvecs;
-       for (; curvec < nvecs; curvec++)
-               cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+       for (; curvec < nvecs; curvec++) {
+               if (!default_mask) {
+                       default_mask = &masks[curvec].mask;
+
+                       cpumask_and(default_mask, irq_default_affinity,
+                                   max_affinity);
+                       if (cpumask_empty(default_mask))
+                               cpumask_copy(default_mask, max_affinity);
+               }
+               cpumask_copy(&masks[curvec].mask, default_mask);
+       }
 
        /* Mark the managed interrupts */
        for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)