diff options
author | Jakub Kicinski <kuba@kernel.org> | 2025-06-17 14:43:58 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2025-06-17 14:44:41 -0700 |
commit | 0f0decc777d7ed4d7ba316a5f92495e38c6fc6b1 (patch) | |
tree | aa60ef15b2398b4796b652e54d54846f554ba82e | |
parent | 170e4e3944aa39accf64d869b27c187f8c08abc7 (diff) | |
parent | 755391121038c06cb653241aa94dcabd87179f62 (diff) |
Merge branch 'shradha_v6.16-rc1' of https://github.com/shradhagupta6/linux
Shradha Gupta says:
====================
Allow dyn MSI-X vector allocation of MANA
In this patchset we want to enable the MANA driver to be able to
allocate MSI-X vectors in PCI dynamically.
The first patch exports pci_msix_prepare_desc() in PCI to be able to
correctly prepare descriptors for dynamically added MSI-X vectors.
The second patch adds the support of dynamic vector allocation in
pci-hyperv PCI controller by enabling the MSI_FLAG_PCI_MSIX_ALLOC_DYN
flag and using the pci_msix_prepare_desc() exported in first patch.
The third patch adds a detailed description of the irq_setup(), to
help understand the function design better.
The fourth patch is a preparation patch for mana changes to support
dynamic IRQ allocation. It contains changes in irq_setup() to allow
skipping first sibling CPU sets, in case certain IRQs are already
affinitized to them.
The fifth patch has the changes in MANA driver to be able to allocate
MSI-X vectors dynamically. If the support does not exist it defaults to
older behavior.
* 'shradha_v6.16-rc1' of https://github.com/shradhagupta6/linux:
net: mana: Allocate MSI-X vectors dynamically
net: mana: Allow irq_setup() to skip cpus for affinity
net: mana: explain irq_setup() algorithm
PCI: hv: Allow dynamic MSI-X vector allocation
PCI/MSI: Export pci_msix_prepare_desc() for dynamic MSI-X allocations
====================
Link: https://patch.msgid.link/1749650984-9193-1-git-send-email-shradhagupta@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 366 | ||||
-rw-r--r-- | drivers/pci/controller/pci-hyperv.c | 3 | ||||
-rw-r--r-- | drivers/pci/msi/irqdomain.c | 5 | ||||
-rw-r--r-- | include/linux/msi.h | 2 | ||||
-rw-r--r-- | include/net/mana/gdma.h | 6 |
5 files changed, 293 insertions, 89 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 069b7a871b78..ac2f39853bf4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -6,6 +6,8 @@ #include <linux/pci.h> #include <linux/utsname.h> #include <linux/version.h> +#include <linux/msi.h> +#include <linux/irqdomain.h> #include <net/mana/mana.h> @@ -80,8 +82,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) return err ? err : -EPROTO; } - if (gc->num_msix_usable > resp.max_msix) - gc->num_msix_usable = resp.max_msix; + if (!pci_msix_can_alloc_dyn(pdev)) { + if (gc->num_msix_usable > resp.max_msix) + gc->num_msix_usable = resp.max_msix; + } else { + /* If dynamic allocation is enabled we have already allocated + * hwc msi + */ + gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1); + } if (gc->num_msix_usable <= 1) return -ENOSPC; @@ -558,7 +567,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue, } queue->eq.msix_index = msi_index; - gic = &gc->irq_contexts[msi_index]; + gic = xa_load(&gc->irq_contexts, msi_index); + if (WARN_ON(!gic)) + return -EINVAL; spin_lock_irqsave(&gic->lock, flags); list_add_rcu(&queue->entry, &gic->eq_list); @@ -583,7 +594,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue) if (WARN_ON(msix_index >= gc->num_msix_usable)) return; - gic = &gc->irq_contexts[msix_index]; + gic = xa_load(&gc->irq_contexts, msix_index); + if (WARN_ON(!gic)) + return; + spin_lock_irqsave(&gic->lock, flags); list_for_each_entry_rcu(eq, &gic->eq_list, entry) { if (queue == eq) { @@ -1363,7 +1377,49 @@ void mana_gd_free_res_map(struct gdma_resource *r) r->size = 0; } -static int irq_setup(unsigned int *irqs, unsigned int len, int node) +/* + * Spread on CPUs with the following heuristics: + * + * 1. No more than one IRQ per CPU, if possible; + * 2. NUMA locality is the second priority; + * 3. Sibling dislocality is the last priority. + * + * Let's consider this topology: + * + * Node 0 1 + * Core 0 1 2 3 + * CPU 0 1 2 3 4 5 6 7 + * + * The most performant IRQ distribution based on the above topology + * and heuristics may look like this: + * + * IRQ Nodes Cores CPUs + * 0 1 0 0-1 + * 1 1 1 2-3 + * 2 1 0 0-1 + * 3 1 1 2-3 + * 4 2 2 4-5 + * 5 2 3 6-7 + * 6 2 2 4-5 + * 7 2 3 6-7 + * + * The heuristics is implemented as follows. + * + * The outer for_each() loop resets the 'weight' to the actual number + * of CPUs in the hop. Then inner for_each() loop decrements it by the + * number of sibling groups (cores) while assigning first set of IRQs + * to each group. IRQs 0 and 1 above are distributed this way. + * + * Now, because NUMA locality is more important, we should walk the + * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's + * implemented by the medium while() loop. We do like this unless the + * number of IRQs assigned on this hop will not become equal to number + * of CPUs in the hop (weight == 0). Then we switch to the next hop and + * do the same thing. + */ + +static int irq_setup(unsigned int *irqs, unsigned int len, int node, + bool skip_first_cpu) { const struct cpumask *next, *prev = cpu_none_mask; cpumask_var_t cpus __free(free_cpumask_var); @@ -1378,11 +1434,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node) while (weight > 0) { cpumask_andnot(cpus, next, prev); for_each_cpu(cpu, cpus) { + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + --weight; + + if (unlikely(skip_first_cpu)) { + skip_first_cpu = false; + continue; + } + if (len-- == 0) goto done; + irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu)); - cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); - --weight; } } prev = next; @@ -1392,47 +1455,108 @@ done: return 0; } -static int mana_gd_setup_irqs(struct pci_dev *pdev) +static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) { struct gdma_context *gc = pci_get_drvdata(pdev); - unsigned int max_queues_per_port; struct gdma_irq_context *gic; - unsigned int max_irqs, cpu; - int start_irq_index = 1; - int nvec, *irqs, irq; - int err, i = 0, j; + bool skip_first_cpu = false; + int *irqs, irq, err, i; - cpus_read_lock(); - max_queues_per_port = num_online_cpus(); - if (max_queues_per_port > MANA_MAX_NUM_QUEUES) - max_queues_per_port = MANA_MAX_NUM_QUEUES; + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + /* + * While processing the next pci irq vector, we start with index 1, + * as IRQ vector at index 0 is already processed for HWC. + * However, the population of irqs array starts with index 0, to be + * further used in irq_setup() + */ + for (i = 1; i <= nvec; i++) { + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; + INIT_LIST_HEAD(&gic->eq_list); + spin_lock_init(&gic->lock); - /* Need 1 interrupt for the Hardware communication Channel (HWC) */ - max_irqs = max_queues_per_port + 1; + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", + i - 1, pci_name(pdev)); - nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); - if (nvec < 0) { - cpus_read_unlock(); - return nvec; + /* one pci vector is already allocated for HWC */ + irqs[i - 1] = pci_irq_vector(pdev, i); + if (irqs[i - 1] < 0) { + err = irqs[i - 1]; + goto free_current_gic; + } + + err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - if (nvec <= num_online_cpus()) - start_irq_index = 0; - irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL); - if (!irqs) { - err = -ENOMEM; - goto free_irq_vector; + /* + * When calling irq_setup() for dynamically added IRQs, if number of + * CPUs is more than or equal to allocated MSI-X, we need to skip the + * first CPU sibling group since they are already affinitized to HWC IRQ + */ + cpus_read_lock(); + if (gc->num_msix_usable <= num_online_cpus()) + skip_first_cpu = true; + + err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu); + if (err) { + cpus_read_unlock(); + goto free_irq; } - gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), - GFP_KERNEL); - if (!gc->irq_contexts) { - err = -ENOMEM; - goto free_irq_array; + cpus_read_unlock(); + kfree(irqs); + return 0; + +free_current_gic: + kfree(gic); +free_irq: + for (i -= 1; i > 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; + + irq_update_affinity_hint(irq, NULL); + free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } + kfree(irqs); + return err; +} + +static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + int *irqs, *start_irqs, irq; + unsigned int cpu; + int err, i; + + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + start_irqs = irqs; for (i = 0; i < nvec; i++) { - gic = &gc->irq_contexts[i]; + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; INIT_LIST_HEAD(&gic->eq_list); spin_lock_init(&gic->lock); @@ -1444,69 +1568,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", i - 1, pci_name(pdev)); - irq = pci_irq_vector(pdev, i); - if (irq < 0) { - err = irq; - goto free_irq; + irqs[i] = pci_irq_vector(pdev, i); + if (irqs[i] < 0) { + err = irqs[i]; + goto free_current_gic; } - if (!i) { - err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_irq; - - /* If number of IRQ is one extra than number of online CPUs, - * then we need to assign IRQ0 (hwc irq) and IRQ1 to - * same CPU. - * Else we will use different CPUs for IRQ0 and IRQ1. - * Also we are using cpumask_local_spread instead of - * cpumask_first for the node, because the node can be - * mem only. - */ - if (start_irq_index) { - cpu = cpumask_local_spread(i, gc->numa_node); - irq_set_affinity_and_hint(irq, cpumask_of(cpu)); - } else { - irqs[start_irq_index] = irq; - } - } else { - irqs[i - start_irq_index] = irq; - err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0, - gic->name, gic); - if (err) - goto free_irq; - } + err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node); - if (err) + /* If number of IRQ is one extra than number of online CPUs, + * then we need to assign IRQ0 (hwc irq) and IRQ1 to + * same CPU. + * Else we will use different CPUs for IRQ0 and IRQ1. + * Also we are using cpumask_local_spread instead of + * cpumask_first for the node, because the node can be + * mem only. + */ + cpus_read_lock(); + if (nvec > num_online_cpus()) { + cpu = cpumask_local_spread(0, gc->numa_node); + irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu)); + irqs++; + nvec -= 1; + } + + err = irq_setup(irqs, nvec, gc->numa_node, false); + if (err) { + cpus_read_unlock(); goto free_irq; + } - gc->max_num_msix = nvec; - gc->num_msix_usable = nvec; cpus_read_unlock(); - kfree(irqs); + kfree(start_irqs); return 0; +free_current_gic: + kfree(gic); free_irq: - for (j = i - 1; j >= 0; j--) { - irq = pci_irq_vector(pdev, j); - gic = &gc->irq_contexts[j]; + for (i -= 1; i >= 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; -free_irq_array: - kfree(irqs); -free_irq_vector: - cpus_read_unlock(); - pci_free_irq_vectors(pdev); + kfree(start_irqs); return err; } +static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + unsigned int max_irqs, min_irqs; + int nvec, err; + + if (pci_msix_can_alloc_dyn(pdev)) { + max_irqs = 1; + min_irqs = 1; + } else { + /* Need 1 interrupt for HWC */ + max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1; + min_irqs = 2; + } + + nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + err = mana_gd_setup_irqs(pdev, nvec); + if (err) { + pci_free_irq_vectors(pdev); + return err; + } + + gc->num_msix_usable = nvec; + gc->max_num_msix = nvec; + + return 0; +} + +static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct msi_map irq_map; + int max_irqs, i, err; + + if (!pci_msix_can_alloc_dyn(pdev)) + /* remain irqs are already allocated with HWC IRQ */ + return 0; + + /* allocate only remaining IRQs*/ + max_irqs = gc->num_msix_usable - 1; + + for (i = 1; i <= max_irqs; i++) { + irq_map = pci_msix_alloc_irq_at(pdev, i, NULL); + if (!irq_map.virq) { + err = irq_map.index; + /* caller will handle cleaning up all allocated + * irqs, after HWC is destroyed + */ + return err; + } + } + + err = mana_gd_setup_dyn_irqs(pdev, max_irqs); + if (err) + return err; + + gc->max_num_msix = gc->max_num_msix + max_irqs; + + return 0; +} + static void mana_gd_remove_irqs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -1521,19 +1704,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev) if (irq < 0) continue; - gic = &gc->irq_contexts[i]; + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; /* Need to clear the hint before free_irq */ irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } pci_free_irq_vectors(pdev); gc->max_num_msix = 0; gc->num_msix_usable = 0; - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; } static int mana_gd_setup(struct pci_dev *pdev) @@ -1548,9 +1733,10 @@ static int mana_gd_setup(struct pci_dev *pdev) if (!gc->service_wq) return -ENOMEM; - err = mana_gd_setup_irqs(pdev); + err = mana_gd_setup_hwc_irqs(pdev); if (err) { - dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); + dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n", + err); goto free_workqueue; } @@ -1566,6 +1752,12 @@ static int mana_gd_setup(struct pci_dev *pdev) if (err) goto destroy_hwc; + err = mana_gd_setup_remaining_irqs(pdev); + if (err) { + dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err); + goto destroy_hwc; + } + err = mana_gd_detect_devices(pdev); if (err) goto destroy_hwc; @@ -1646,6 +1838,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gc->is_pf = mana_is_pf(pdev->device); gc->bar0_va = bar0_va; gc->dev = &pdev->dev; + xa_init(&gc->irq_contexts); if (gc->is_pf) gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root); @@ -1680,6 +1873,7 @@ unmap_bar: */ debugfs_remove_recursive(gc->mana_pci_debugfs); gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); pci_iounmap(pdev, bar0_va); free_gc: pci_set_drvdata(pdev, NULL); @@ -1705,6 +1899,8 @@ static void mana_gd_remove(struct pci_dev *pdev) gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); + pci_iounmap(pdev, gc->bar0_va); vfree(gc); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ef5d655a0052..86ca041bf74a 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -2119,6 +2119,7 @@ static struct irq_chip hv_msi_irq_chip = { static struct msi_domain_ops hv_msi_ops = { .msi_prepare = hv_msi_prepare, .msi_free = hv_msi_free, + .prepare_desc = pci_msix_prepare_desc, }; /** @@ -2140,7 +2141,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) hbus->msi_info.ops = &hv_msi_ops; hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX); + MSI_FLAG_PCI_MSIX | MSI_FLAG_PCI_MSIX_ALLOC_DYN); hbus->msi_info.handler = FLOW_HANDLER; hbus->msi_info.handler_name = FLOW_NAME; hbus->msi_info.data = hbus; diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index c05152733993..765312c92d9b 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data) pci_msix_unmask(irq_data_get_msi_desc(data)); } -static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, - struct msi_desc *desc) +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc) { /* Don't fiddle with preallocated MSI descriptors */ if (!desc->pci.mask_base) msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); } +EXPORT_SYMBOL_GPL(pci_msix_prepare_desc); static const struct msi_domain_template pci_msix_template = { .chip = { diff --git a/include/linux/msi.h b/include/linux/msi.h index 6863540f4b71..7f254bde5426 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -706,6 +706,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct irq_domain *parent); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc); #else /* CONFIG_PCI_MSI */ static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index bfae59202669..6fe6cbcd512d 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -388,7 +388,7 @@ struct gdma_context { unsigned int max_num_queues; unsigned int max_num_msix; unsigned int num_msix_usable; - struct gdma_irq_context *irq_contexts; + struct xarray irq_contexts; /* L2 MTU */ u16 adapter_mtu; @@ -580,6 +580,9 @@ enum { /* Driver can handle holes (zeros) in the device list */ #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) +/* Driver supports dynamic MSI-X vector allocation */ +#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) + /* Driver can self reset on FPGA Reconfig EQE notification */ #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) @@ -589,6 +592,7 @@ enum { GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ + GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE) #define GDMA_DRV_CAP_FLAGS2 0 |