diff mbox series

[v3,9/9] powerpc/xive: Modernize XIVE-IPI domain with an 'alloc' handler

Message ID 20210331144514.892250-10-clg@kaod.org (mailing list archive)
State Accepted
Headers show
Series powerpc/xive: Map one IPI interrupt per node | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (87d76f542a24ecfa797e9bd3bb56c0f19aabff57)
snowpatch_ozlabs/build-ppc64le success Build succeeded
snowpatch_ozlabs/build-ppc64be success Build succeeded
snowpatch_ozlabs/build-ppc64e success Build succeeded
snowpatch_ozlabs/build-pmac32 success Build succeeded
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 50 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Cédric Le Goater March 31, 2021, 2:45 p.m. UTC
Instead of calling irq_create_mapping() to map the IPI for a node,
introduce an 'alloc' handler. This is usually an extension to support
hierarchy irq_domains which is not exactly the case for XIVE-IPI
domain. However, we can now use the irq_domain_alloc_irqs() routine
which allocates the IRQ descriptor on the specified node, even better
for cache performance on multi node machines.

Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
---

 I didn't rerun the benchmark to check for a difference.
 
 arch/powerpc/sysdev/xive/common.c | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 06f29cd07448..bb7435ffe99c 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1103,15 +1103,26 @@  static struct irq_chip xive_ipi_chip = {
  * IPIs are marked per-cpu. We use separate HW interrupts under the
  * hood but associated with the same "linux" interrupt
  */
-static int xive_ipi_irq_domain_map(struct irq_domain *h, unsigned int virq,
-				   irq_hw_number_t hw)
+struct xive_ipi_alloc_info {
+	irq_hw_number_t hwirq;
+};
+
+static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				     unsigned int nr_irqs, void *arg)
 {
-	irq_set_chip_and_handler(virq, &xive_ipi_chip, handle_percpu_irq);
+	struct xive_ipi_alloc_info *info = arg;
+	int i;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
+				    domain->host_data, handle_percpu_irq,
+				    NULL, NULL);
+	}
 	return 0;
 }
 
 static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
-	.map = xive_ipi_irq_domain_map,
+	.alloc  = xive_ipi_irq_domain_alloc,
 };
 
 static int __init xive_request_ipi(void)
@@ -1136,7 +1147,7 @@  static int __init xive_request_ipi(void)
 
 	for_each_node(node) {
 		struct xive_ipi_desc *xid = &xive_ipis[node];
-		irq_hw_number_t ipi_hwirq = node;
+		struct xive_ipi_alloc_info info = { node };
 
 		/* Skip nodes without CPUs */
 		if (cpumask_empty(cpumask_of_node(node)))
@@ -1147,9 +1158,9 @@  static int __init xive_request_ipi(void)
 		 * Since the HW interrupt number doesn't have any meaning,
 		 * simply use the node number.
 		 */
-		xid->irq = irq_create_mapping(ipi_domain, ipi_hwirq);
-		if (!xid->irq) {
-			ret = -EINVAL;
+		xid->irq = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
+		if (xid->irq < 0) {
+			ret = xid->irq;
 			goto out_free_xive_ipis;
 		}