@@ -26,10 +26,8 @@ static inline void set_cede_latency_hint(u8 latency_hint)
get_lppaca()->cede_latency_hint = latency_hint;
}
-static inline long cede_processor(void)
-{
- return plpar_hcall_norets(H_CEDE);
-}
+int cpu_is_ceded(int cpu);
+long cede_processor(void);
static inline long extended_cede_processor(unsigned long latency_hint)
{
@@ -44,6 +44,7 @@
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
+#include <asm/plpar_wrappers.h>
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
@@ -991,6 +992,11 @@ int rtas_ibm_suspend_me(u64 handle)
goto out_hotplug_enable;
}
+ for_each_present_cpu(cpu) {
+ if (cpu_is_ceded(cpu))
+ plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+ }
+
/* Call function on all CPUs. One of us will make the
* rtas call
*/
@@ -331,6 +331,24 @@ static int alloc_dispatch_log_kmem_cache(void)
}
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
+static DEFINE_PER_CPU(int, cpu_ceded);
+
+int cpu_is_ceded(int cpu)
+{
+ return per_cpu(cpu_ceded, cpu);
+}
+
+long cede_processor(void)
+{
+ long rc;
+
+ per_cpu(cpu_ceded, raw_smp_processor_id()) = 1;
+ rc = plpar_hcall_norets(H_CEDE);
+ per_cpu(cpu_ceded, raw_smp_processor_id()) = 0;
+
+ return rc;
+}
+
static void pseries_lpar_idle(void)
{
/*