diff mbox series

[3/5] lock: Move code around

Message ID 20180815051039.7019-3-benh@kernel.crashing.org
State Accepted
Headers show
Series [1/5] phb4: Workaround PHB errata with CFG write UR/CA errors | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success master/apply_patch Successfully applied

Commit Message

Benjamin Herrenschmidt Aug. 15, 2018, 5:10 a.m. UTC
This moves __try_lock() and lock_timeout() as a preparation
for the next patch. No code change

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
 core/lock.c | 78 ++++++++++++++++++++++++++---------------------------
 1 file changed, 39 insertions(+), 39 deletions(-)
diff mbox series

Patch

diff --git a/core/lock.c b/core/lock.c
index c320f2c3..1fc71a92 100644
--- a/core/lock.c
+++ b/core/lock.c
@@ -63,6 +63,45 @@  static void unlock_check(struct lock *l)
 		lock_error(l, "Releasing lock we don't hold depth", 4);
 }
 
+static inline bool __nomcount __try_lock(struct cpu_thread *cpu, struct lock *l)
+{
+	uint64_t val;
+
+	val = cpu->pir;
+	val <<= 32;
+	val |= 1;
+
+	barrier();
+	if (__cmpxchg64(&l->lock_val, 0, val) == 0) {
+		sync();
+		return true;
+	}
+	return false;
+}
+
+#define LOCK_TIMEOUT_MS 5000
+static inline bool lock_timeout(unsigned long start)
+{
+	/* Print warning if lock has been spinning for more than TIMEOUT_MS */
+	unsigned long wait = tb_to_msecs(mftb());
+
+	if (wait - start > LOCK_TIMEOUT_MS) {
+		/*
+		 * If the timebase is invalid, we shouldn't
+		 * throw an error. This is possible with pending HMIs
+		 * that need to recover TB.
+		 */
+		if( !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID))
+			return false;
+		prlog(PR_WARNING, "WARNING: Lock has been "\
+		      "spinning for %lums\n", wait - start);
+		backtrace();
+		return true;
+	}
+
+	return false;
+}
+#else
 /* Find circular dependencies in the lock requests. */
 static bool check_deadlock(void)
 {
@@ -132,29 +171,6 @@  static void remove_lock_request(void)
 {
 	this_cpu()->requested_lock = NULL;
 }
-
-#define LOCK_TIMEOUT_MS 5000
-static inline bool lock_timeout(unsigned long start)
-{
-	/* Print warning if lock has been spinning for more than TIMEOUT_MS */
-	unsigned long wait = tb_to_msecs(mftb());
-
-	if (wait - start > LOCK_TIMEOUT_MS) {
-		/*
-		 * If the timebase is invalid, we shouldn't
-		 * throw an error. This is possible with pending HMIs
-		 * that need to recover TB.
-		 */
-		if( !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID))
-			return false;
-		prlog(PR_WARNING, "WARNING: Lock has been "\
-		      "spinning for %lums\n", wait - start);
-		backtrace();
-		return true;
-	}
-
-	return false;
-}
 #else
 static inline void lock_check(struct lock *l) { };
 static inline void unlock_check(struct lock *l) { };
@@ -170,22 +186,6 @@  bool lock_held_by_me(struct lock *l)
 	return l->lock_val == ((pir64 << 32) | 1);
 }
 
-static inline bool __try_lock(struct cpu_thread *cpu, struct lock *l)
-{
-	uint64_t val;
-
-	val = cpu->pir;
-	val <<= 32;
-	val |= 1;
-
-	barrier();
-	if (__cmpxchg64(&l->lock_val, 0, val) == 0) {
-		sync();
-		return true;
-	}
-	return false;
-}
-
 bool try_lock_caller(struct lock *l, const char *owner)
 {
 	struct cpu_thread *cpu = this_cpu();