@@ -521,15 +521,10 @@ _GLOBAL(_switch)
#endif /* CONFIG_SMP */
/*
- * If we optimise away the clear of the reservation in system
- * calls because we know the CPU tracks the address of the
- * reservation, then we need to clear it here to cover the
- * case that the kernel context switch path has no larx
- * instructions.
+ * The kernel context switch path must contain a spin_lock,
+ * which contains larx/stcx, which will clear any reservation
+ * of the task being switched.
*/
-BEGIN_FTR_SECTION
- ldarx r6,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
BEGIN_FTR_SECTION
/*
@@ -2875,6 +2875,12 @@ context_switch(struct rq *rq, struct task_struct *prev,
rq_unpin_lock(rq, rf);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ /*
+ * Some architectures require that a spin lock is taken before
+ * _switch. The rq_lock satisfies this condition. See powerpc
+ * _switch for details.
+ */
+
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
There is no need to break reservation in _switch, because we are guranteed that context switch path will include a larx/stcx. Comment the guarantee and remove the reservation clear from _switch. This is worth 1-2% in context switch performance. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/powerpc/kernel/entry_64.S | 11 +++-------- kernel/sched/core.c | 6 ++++++ 2 files changed, 9 insertions(+), 8 deletions(-)