Patchwork [01/18] powerpc/spufs: Change runq_lock to a mutex

login
register
mail settings
Submitter Andre Detsch
Date Dec. 10, 2008, 7:34 p.m.
Message ID <200812101734.09215.adetsch@br.ibm.com>
Download mbox | patch
Permalink /patch/13302/
State RFC
Headers show

Comments

Andre Detsch - Dec. 10, 2008, 7:34 p.m.
We'll need to next runq_lock with the context state mutex, so
change runq_lock itself into a mutex

Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
---
 arch/powerpc/platforms/cell/spufs/sched.c |   28 ++++++++++++++--------------
 1 files changed, 14 insertions(+), 14 deletions(-)

Patch

diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 142e9c2..8ebbf05 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -51,7 +51,7 @@ 
 struct spu_prio_array {
 	DECLARE_BITMAP(bitmap, MAX_PRIO);
 	struct list_head runq[MAX_PRIO];
-	spinlock_t runq_lock;
+	struct mutex runq_lock;
 	int nr_waiting;
 };
 
@@ -179,9 +179,9 @@  static int node_allowed(struct spu_context *ctx, int node)
 {
 	int rval;
 
-	spin_lock(&spu_prio->runq_lock);
+	mutex_lock(&spu_prio->runq_lock);
 	rval = __node_allowed(ctx, node);
-	spin_unlock(&spu_prio->runq_lock);
+	mutex_unlock(&spu_prio->runq_lock);
 
 	return rval;
 }
@@ -514,9 +514,9 @@  static void __spu_add_to_rq(struct spu_context *ctx)
 
 static void spu_add_to_rq(struct spu_context *ctx)
 {
-	spin_lock(&spu_prio->runq_lock);
+	mutex_lock(&spu_prio->runq_lock);
 	__spu_add_to_rq(ctx);
-	spin_unlock(&spu_prio->runq_lock);
+	mutex_unlock(&spu_prio->runq_lock);
 }
 
 static void __spu_del_from_rq(struct spu_context *ctx)
@@ -535,9 +535,9 @@  static void __spu_del_from_rq(struct spu_context *ctx)
 
 void spu_del_from_rq(struct spu_context *ctx)
 {
-	spin_lock(&spu_prio->runq_lock);
+	mutex_lock(&spu_prio->runq_lock);
 	__spu_del_from_rq(ctx);
-	spin_unlock(&spu_prio->runq_lock);
+	mutex_unlock(&spu_prio->runq_lock);
 }
 
 static void spu_prio_wait(struct spu_context *ctx)
@@ -551,18 +551,18 @@  static void spu_prio_wait(struct spu_context *ctx)
 	 */
 	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
 
-	spin_lock(&spu_prio->runq_lock);
+	mutex_lock(&spu_prio->runq_lock);
 	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
 	if (!signal_pending(current)) {
 		__spu_add_to_rq(ctx);
-		spin_unlock(&spu_prio->runq_lock);
+		mutex_unlock(&spu_prio->runq_lock);
 		mutex_unlock(&ctx->state_mutex);
 		schedule();
 		mutex_lock(&ctx->state_mutex);
-		spin_lock(&spu_prio->runq_lock);
+		mutex_lock(&spu_prio->runq_lock);
 		__spu_del_from_rq(ctx);
 	}
-	spin_unlock(&spu_prio->runq_lock);
+	mutex_unlock(&spu_prio->runq_lock);
 	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&ctx->stop_wq, &wait);
 }
@@ -838,7 +838,7 @@  static struct spu_context *grab_runnable_context(int prio, int node)
 	struct spu_context *ctx;
 	int best;
 
-	spin_lock(&spu_prio->runq_lock);
+	mutex_lock(&spu_prio->runq_lock);
 	best = find_first_bit(spu_prio->bitmap, prio);
 	while (best < prio) {
 		struct list_head *rq = &spu_prio->runq[best];
@@ -854,7 +854,7 @@  static struct spu_context *grab_runnable_context(int prio, int node)
 	}
 	ctx = NULL;
  found:
-	spin_unlock(&spu_prio->runq_lock);
+	mutex_unlock(&spu_prio->runq_lock);
 	return ctx;
 }
 
@@ -1122,7 +1122,7 @@  int __init spu_sched_init(void)
 		INIT_LIST_HEAD(&spu_prio->runq[i]);
 		__clear_bit(i, spu_prio->bitmap);
 	}
-	spin_lock_init(&spu_prio->runq_lock);
+	mutex_init(&spu_prio->runq_lock);
 
 	setup_timer(&spusched_timer, spusched_wake, 0);
 	setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);