@@ -79,17 +79,6 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
inc_active_gangs(gang);
- /* If the gang is running, it needs to be stopped, since we have a
- * new context that needs to be gang scheduled. Gangs are allowed
- * to grow and shrink over time, but they are unscheduled when it
- * happens as the gang may need to migrated to a different node.
- */
- if (atomic_read(&gang->nstarted)) {
- mutex_lock(&gang->mutex);
- spu_deactivate(gang);
- mutex_unlock(&gang->mutex);
- }
-
spin_lock_init(&ctx->mmio_lock);
mutex_init(&ctx->mapping_lock);
kref_init(&ctx->kref);
@@ -141,14 +130,6 @@ void destroy_spu_context(struct kref *kref)
spu_context_nospu_trace(destroy_spu_context__enter, ctx);
- /*
- * Deactivate and make it non-runnable while we work on it.
- */
- mutex_lock(&gang->mutex);
- WARN_ON(ctx->gang != gang);
- spu_deactivate(gang);
- mutex_unlock(&gang->mutex);
-
spu_fini_csa(&ctx->csa);
spu_gang_remove_ctx(ctx->gang, ctx);
if (ctx->prof_priv_kref)
@@ -90,6 +90,15 @@ int put_spu_gang(struct spu_gang *gang)
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx)
{
mutex_lock(&gang->mutex);
+ /*
+ * If the gang is running, it needs to be stopped, since we have a
+ * new context that needs to be gang scheduled. Gangs are allowed
+ * to grow and shrink over time, but they are unscheduled when it
+ * happens as the gang may need to migrated to a different node.
+ */
+ if (atomic_read(&gang->nstarted))
+ spu_deactivate(gang);
+
ctx->gang = get_spu_gang(gang);
list_add(&ctx->gang_list, &gang->list);
gang->contexts++;
@@ -115,7 +124,9 @@ void update_gang_stats(struct spu_gang *gang, struct spu_context *ctx)
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
{
mutex_lock(&gang->mutex);
+ /* Deactivate and make it non-runnable while we work on it. */
WARN_ON(ctx->gang != gang);
+ spu_deactivate(gang);
if (!list_empty(&ctx->aff_list)) {
list_del_init(&ctx->aff_list);
gang->aff_flags &= ~AFF_OFFSETS_SET;