Patchwork [7/11] Use stop machine to update cpu maps

login
register
mail settings
Submitter Nathan Fontenot
Date March 9, 2013, 4:05 a.m.
Message ID <513AB516.1070904@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/226304/
State Superseded, archived
Headers show

Comments

Nathan Fontenot - March 9, 2013, 4:05 a.m.
From: Jesse Larrew <jlarrew@linux.vnet.ibm.com>

The new PRRN firmware feature allows CPU and memory resources to be
transparently reassigned across NUMA boundaries. When this happens, the
kernel must update the node maps to reflect the new affinity
information.

Although the NUMA maps can be protected by locking primitives during the
update itself, this is insufficient to prevent concurrent accesses to these
structures. Since cpumask_of_node() hands out a pointer to these
structures, they can still be modified outside of the lock. Furthermore,
tracking down each usage of these pointers and adding locks would be quite
invasive and difficult to maintain.

Situations like these are best handled using stop_machine(). Since the NUMA
affinity updates are exceptionally rare events, this approach has the
benefit of not adding any overhead while accessing the NUMA maps during
normal operation.

Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
---
 arch/powerpc/mm/numa.c |   51 +++++++++++++++++++++++++++++++++----------------
 1 file changed, 35 insertions(+), 16 deletions(-)

Patch

Index: powerpc/arch/powerpc/mm/numa.c
===================================================================
--- powerpc.orig/arch/powerpc/mm/numa.c	2013-03-08 19:57:38.000000000 -0600
+++ powerpc/arch/powerpc/mm/numa.c	2013-03-08 19:57:47.000000000 -0600
@@ -22,6 +22,7 @@ 
 #include <linux/pfn.h>
 #include <linux/cpuset.h>
 #include <linux/node.h>
+#include <linux/stop_machine.h>
 #include <asm/sparsemem.h>
 #include <asm/prom.h>
 #include <asm/smp.h>
@@ -1254,6 +1255,12 @@ 
 
 /* Virtual Processor Home Node (VPHN) support */
 #ifdef CONFIG_PPC_SPLPAR
+struct topology_update_data {
+	int cpu;
+	int old_nid;
+	int new_nid;
+};
+
 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
 static cpumask_t cpu_associativity_changes_mask;
 static int vphn_enabled;
@@ -1405,34 +1412,46 @@ 
 }
 
 /*
+ * Update the CPU maps and sysfs entries for a single CPU when its NUMA
+ * characteristics change. This function doesn't perform any locking and is
+ * only safe to call from stop_machine().
+ */
+static int update_cpu_topology(void *data)
+{
+	struct topology_update_data *update = data;
+
+	if (!update)
+		return -EINVAL;
+
+	unregister_cpu_under_node(update->cpu, update->old_nid);
+	unmap_cpu_from_node(update->cpu);
+	map_cpu_to_node(update->cpu, update->new_nid);
+	register_cpu_under_node(update->cpu, update->new_nid);
+
+	return 0;
+}
+
+/*
  * Update the node maps and sysfs entries for each cpu whose home node
  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
  */
 int arch_update_cpu_topology(void)
 {
-	int cpu, nid, old_nid, changed = 0;
+	int cpu, changed = 0;
+	struct topology_update_data update;
 	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
 	struct device *dev;
 
 	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
+		update.cpu = cpu;
 		vphn_get_associativity(cpu, associativity);
-		nid = associativity_to_nid(associativity);
-
-		if (nid < 0 || !node_online(nid))
-			nid = first_online_node;
+		update.new_nid = associativity_to_nid(associativity);
 
-		old_nid = numa_cpu_lookup_table[cpu];
-
-		/* Disable hotplug while we update the cpu
-		 * masks and sysfs.
-		 */
-		get_online_cpus();
-		unregister_cpu_under_node(cpu, old_nid);
-		unmap_cpu_from_node(cpu);
-		map_cpu_to_node(cpu, nid);
-		register_cpu_under_node(cpu, nid);
-		put_online_cpus();
+		if (update.new_nid < 0 || !node_online(update.new_nid))
+			update.new_nid = first_online_node;
 
+		update.old_nid = numa_cpu_lookup_table[cpu];
+		stop_machine(update_cpu_topology, &update, cpu_online_mask);
 		dev = get_cpu_device(cpu);
 		if (dev)
 			kobject_uevent(&dev->kobj, KOBJ_CHANGE);