diff mbox

powerpc/spufs: Change cbe_spu_info mutex_lock to spin_lock

Message ID 200809112037.41236.adetsch@br.ibm.com
State Superseded
Delegated to: Jeremy Kerr
Headers show

Commit Message

Andre Detsch Sept. 11, 2008, 11:37 p.m. UTC
This structure groups the physical spus. The list_mutex must be changed
to a spin lock, because the runq_lock is a spin_lock.  You can't nest
mutexes under spin_locks.  The lock for the cbe_spu_info[] is taken
under the runq_lock as may spus need to be allocated to schedule a gang.

Change spu_bind_context() and spu_unbind_context() so that they are not
called under the new spin lock as that would cause a deadlock, if they
blocked on higher level allocations (mmap) that are protected by mutexes.

Signed-off-by: Luke Browning <lukebrowning@us.ibm.com>
Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 8b2eb04..9d799b6 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -187,7 +187,7 @@  struct spu {
 };
 
 struct cbe_spu_info {
-	struct mutex list_mutex;
+	spinlock_t list_lock;
 	struct list_head spus;
 	int n_spus;
 	int nr_active;
diff --git a/arch/powerpc/platforms/cell/spu_base.c 
b/arch/powerpc/platforms/cell/spu_base.c
index a5bdb89..b1a97a1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -650,10 +650,10 @@  static int __init create_spu(void *data)
 	if (ret)
 		goto out_free_irqs;
 
-	mutex_lock(&cbe_spu_info[spu->node].list_mutex);
+	spin_lock(&cbe_spu_info[spu->node].list_lock);
 	list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
 	cbe_spu_info[spu->node].n_spus++;
-	mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
+	spin_unlock(&cbe_spu_info[spu->node].list_lock);
 
 	mutex_lock(&spu_full_list_mutex);
 	spin_lock_irqsave(&spu_full_list_lock, flags);
@@ -732,7 +732,7 @@  static int __init init_spu_base(void)
 	int i, ret = 0;
 
 	for (i = 0; i < MAX_NUMNODES; i++) {
-		mutex_init(&cbe_spu_info[i].list_mutex);
+		spin_lock_init(&cbe_spu_info[i].list_lock);
 		INIT_LIST_HEAD(&cbe_spu_info[i].spus);
 	}
 
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c 
b/arch/powerpc/platforms/cell/spufs/sched.c
index 897c740..386aa0a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -153,11 +153,11 @@  void spu_update_sched_info(struct spu_context *ctx)
 		node = ctx->spu->node;
 
 		/*
-		 * Take list_mutex to sync with find_victim().
+		 * Take list_lock to sync with find_victim().
 		 */
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		__spu_update_sched_info(ctx);
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	} else {
 		__spu_update_sched_info(ctx);
 	}
@@ -179,9 +179,9 @@  static int node_allowed(struct spu_context *ctx, int node)
 {
 	int rval;
 
-	spin_lock(&spu_prio->runq_lock);
+	spin_lock(&cbe_spu_info[node].list_lock);
 	rval = __node_allowed(ctx, node);
-	spin_unlock(&spu_prio->runq_lock);
+	spin_unlock(&cbe_spu_info[node].list_lock);
 
 	return rval;
 }
@@ -199,7 +199,7 @@  void do_notify_spus_active(void)
 	for_each_online_node(node) {
 		struct spu *spu;
 
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if (spu->alloc_state != SPU_FREE) {
 				struct spu_context *ctx = spu->ctx;
@@ -209,7 +209,7 @@  void do_notify_spus_active(void)
 				wake_up_all(&ctx->stop_wq);
 			}
 		}
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	}
 }
 
@@ -233,7 +233,6 @@  static void spu_bind_context(struct spu *spu, struct 
spu_context *ctx)
 	spu_associate_mm(spu, ctx->owner);
 
 	spin_lock_irq(&spu->register_lock);
-	spu->ctx = ctx;
 	spu->flags = 0;
 	ctx->spu = spu;