powerpc/pseries: Check for ceded CPU's during LPAR migration
diff mbox series

Message ID 3557739b-42eb-58b8-4cc4-5633198469e7@linux.vnet.ibm.com
State New
Headers show
Series
  • powerpc/pseries: Check for ceded CPU's during LPAR migration
Related show

Checks

Context Check Description
snowpatch_ozlabs/checkpatch warning total: 0 errors, 1 warnings, 0 checks, 54 lines checked
snowpatch_ozlabs/build-pmac32 success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/build-ppc64e success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/build-ppc64be fail build failed!
snowpatch_ozlabs/build-ppc64le fail build failed!
snowpatch_ozlabs/apply_patch success next/apply_patch Successfully applied

Commit Message

Michael Bringmann Jan. 23, 2019, 8:40 p.m. UTC
This patch is to check for cede'ed CPUs during LPM.  Some extreme
tests encountered a problem ehere Linux has put some threads to
sleep (possibly to save energy or something), LPM was attempted,
and the Linux kernel didn't awaken the sleeping threads, but issued
the H_JOIN for the active threads.  Since the sleeping threads
are not awake, they can not issue the expected H_JOIN, and the
partition would never suspend.  This patch wakes the sleeping
threads back up.

Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: Gustavo Walbon <gwalbon@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/plpar_wrappers.h |    6 ++----
 arch/powerpc/kernel/rtas.c                |    6 ++++++
 arch/powerpc/platforms/pseries/setup.c    |   18 ++++++++++++++++++
 3 files changed, 26 insertions(+), 4 deletions(-)

Patch
diff mbox series

diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index cff5a41..8292eff 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -26,10 +26,8 @@  static inline void set_cede_latency_hint(u8 latency_hint)
 	get_lppaca()->cede_latency_hint = latency_hint;
 }
 
-static inline long cede_processor(void)
-{
-	return plpar_hcall_norets(H_CEDE);
-}
+int cpu_is_ceded(int cpu);
+long cede_processor(void);
 
 static inline long extended_cede_processor(unsigned long latency_hint)
 {
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index de35bd8f..9d9d08d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -44,6 +44,7 @@ 
 #include <asm/time.h>
 #include <asm/mmu.h>
 #include <asm/topology.h>
+#include <asm/plpar_wrappers.h>
 
 /* This is here deliberately so it's only used in this file */
 void enter_rtas(unsigned long);
@@ -991,6 +992,11 @@  int rtas_ibm_suspend_me(u64 handle)
 		goto out_hotplug_enable;
 	}
 
+	for_each_present_cpu(cpu) {
+		if (cpu_is_ceded(cpu))
+			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+	}
+
 	/* Call function on all CPUs.  One of us will make the
 	 * rtas call
 	 */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 4078a05..0106668 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -331,6 +331,24 @@  static int alloc_dispatch_log_kmem_cache(void)
 }
 machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
 
+static DEFINE_PER_CPU(int, cpu_ceded);
+
+int cpu_is_ceded(int cpu)
+{
+	return per_cpu(cpu_ceded, cpu);
+}
+
+long cede_processor(void)
+{
+	long rc;
+
+	per_cpu(cpu_ceded, raw_smp_processor_id()) = 1;
+	rc = plpar_hcall_norets(H_CEDE);
+	per_cpu(cpu_ceded, raw_smp_processor_id()) = 0;
+
+	return rc;
+}
+
 static void pseries_lpar_idle(void)
 {
 	/*