spu->dar = 0;
 }
 
-static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
+int spu_stopped(struct spu_context *ctx, u32 *stat)
 {
-       struct spu *spu;
-       u64 pte_fault;
+       u64 dsisr;
+       u32 stopped;
 
        *stat = ctx->ops->status_read(ctx);
 
-       spu = ctx->spu;
-       if (ctx->state != SPU_STATE_RUNNABLE ||
-           test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
+       if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
+               return 1;
+
+       stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
+               SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
+       if (*stat & stopped)
+               return 1;
+
+       dsisr = ctx->csa.dsisr;
+       if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
                return 1;
-       pte_fault = ctx->csa.dsisr &
-           (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
-       return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
-               1 : 0;
+
+       if (ctx->csa.class_0_pending)
+               return 1;
+
+       return 0;
 }
 
 static int spu_setup_isolated(struct spu_context *ctx)
 
 static int spu_run_init(struct spu_context *ctx, u32 *npc)
 {
-       unsigned long runcntl;
+       unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
        int ret;
 
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
-       if (ctx->flags & SPU_CREATE_ISOLATE) {
-               /*
-                * Force activation of spu.  Isolated state assumes that
-                * special loader context is loaded and running on spu.
-                */
+       /*
+        * NOSCHED is synchronous scheduling with respect to the caller.
+        * The caller waits for the context to be loaded.
+        */
+       if (ctx->flags & SPU_CREATE_NOSCHED) {
                if (ctx->state == SPU_STATE_SAVED) {
-                       spu_set_timeslice(ctx);
-
                        ret = spu_activate(ctx, 0);
                        if (ret)
                                return ret;
                }
+       }
 
+       /*
+        * Apply special setup as required.
+        */
+       if (ctx->flags & SPU_CREATE_ISOLATE) {
                if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
                        ret = spu_setup_isolated(ctx);
                        if (ret)
                        (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
                if (runcntl == 0)
                        runcntl = SPU_RUNCNTL_RUNNABLE;
+       }
 
+       if (ctx->flags & SPU_CREATE_NOSCHED) {
                spuctx_switch_state(ctx, SPU_UTIL_USER);
                ctx->ops->runcntl_write(ctx, runcntl);
-
        } else {
                unsigned long privcntl;
 
                        privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
                else
                        privcntl = SPU_PRIVCNTL_MODE_NORMAL;
-               runcntl = SPU_RUNCNTL_RUNNABLE;
 
                ctx->ops->npc_write(ctx, *npc);
                ctx->ops->privcntl_write(ctx, privcntl);
+               ctx->ops->runcntl_write(ctx, runcntl);
 
                if (ctx->state == SPU_STATE_SAVED) {
-                       spu_set_timeslice(ctx);
                        ret = spu_activate(ctx, 0);
                        if (ret)
                                return ret;
+               } else {
+                       spuctx_switch_state(ctx, SPU_UTIL_USER);
                }
-
-               spuctx_switch_state(ctx, SPU_UTIL_USER);
-               ctx->ops->runcntl_write(ctx, runcntl);
        }
 
        return 0;
 {
        int ret = 0;
 
+       spu_del_from_rq(ctx);
+
        *status = ctx->ops->status_read(ctx);
        *npc = ctx->ops->npc_read(ctx);
 
        return ret;
 }
 
-static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
-                                        u32 *status)
-{
-       int ret;
-
-       ret = spu_run_fini(ctx, npc, status);
-       if (ret)
-               return ret;
-
-       if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
-               return *status;
-
-       ret = spu_acquire_runnable(ctx, 0);
-       if (ret)
-               return ret;
-
-       spuctx_switch_state(ctx, SPU_UTIL_USER);
-       return 0;
-}
-
 /*
  * SPU syscall restarting is tricky because we violate the basic
  * assumption that the signal handler is running on the interrupted
                if (ret)
                        break;
 
-               if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
-                       ret = spu_reacquire_runnable(ctx, npc, &status);
-                       if (ret)
-                               goto out2;
-                       continue;
-               }
-
                if (signal_pending(current))
                        ret = -ERESTARTSYS;
-
-
        } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
                                      SPU_STATUS_STOPPED_BY_HALT |
                                       SPU_STATUS_SINGLE_STEP)));
        ret = spu_run_fini(ctx, npc, &status);
        spu_yield(ctx);
 
-out2:
        if ((ret == 0) ||
            ((ret == -ERESTARTSYS) &&
             ((status & SPU_STATUS_STOPPED_BY_HALT) ||
 
 
        if (ctx->state == SPU_STATE_RUNNABLE) {
                node = ctx->spu->node;
+
+               /*
+                * Take list_mutex to sync with find_victim().
+                */
                mutex_lock(&cbe_spu_info[node].list_mutex);
                __spu_update_sched_info(ctx);
                mutex_unlock(&cbe_spu_info[node].list_mutex);
        }
 }
 
+static void spu_add_to_rq(struct spu_context *ctx)
+{
+       spin_lock(&spu_prio->runq_lock);
+       __spu_add_to_rq(ctx);
+       spin_unlock(&spu_prio->runq_lock);
+}
+
 static void __spu_del_from_rq(struct spu_context *ctx)
 {
        int prio = ctx->prio;
        }
 }
 
+void spu_del_from_rq(struct spu_context *ctx)
+{
+       spin_lock(&spu_prio->runq_lock);
+       __spu_del_from_rq(ctx);
+       spin_unlock(&spu_prio->runq_lock);
+}
+
 static void spu_prio_wait(struct spu_context *ctx)
 {
        DEFINE_WAIT(wait);
 
+       /*
+        * The caller must explicitly wait for a context to be loaded
+        * if the nosched flag is set.  If NOSCHED is not set, the caller
+        * queues the context and waits for an spu event or error.
+        */
+       BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
+
        spin_lock(&spu_prio->runq_lock);
        prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
        if (!signal_pending(current)) {
                        struct spu_context *tmp = spu->ctx;
 
                        if (tmp && tmp->prio > ctx->prio &&
+                           !(tmp->flags & SPU_CREATE_NOSCHED) &&
                            (!victim || tmp->prio > victim->prio))
                                victim = spu->ctx;
                }
 
                        victim->stats.invol_ctx_switch++;
                        spu->stats.invol_ctx_switch++;
+                       spu_add_to_rq(victim);
+
                        mutex_unlock(&victim->state_mutex);
-                       /*
-                        * We need to break out of the wait loop in spu_run
-                        * manually to ensure this context gets put on the
-                        * runqueue again ASAP.
-                        */
-                       wake_up(&victim->stop_wq);
+
                        return spu;
                }
        }
        return NULL;
 }
 
+static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
+{
+       int node = spu->node;
+       int success = 0;
+
+       spu_set_timeslice(ctx);
+
+       mutex_lock(&cbe_spu_info[node].list_mutex);
+       if (spu->ctx == NULL) {
+               spu_bind_context(spu, ctx);
+               cbe_spu_info[node].nr_active++;
+               spu->alloc_state = SPU_USED;
+               success = 1;
+       }
+       mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+       if (success)
+               wake_up_all(&ctx->run_wq);
+       else
+               spu_add_to_rq(ctx);
+}
+
+static void spu_schedule(struct spu *spu, struct spu_context *ctx)
+{
+       spu_acquire(ctx);
+       __spu_schedule(spu, ctx);
+       spu_release(ctx);
+}
+
+static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
+{
+       int node = spu->node;
+
+       mutex_lock(&cbe_spu_info[node].list_mutex);
+       cbe_spu_info[node].nr_active--;
+       spu->alloc_state = SPU_FREE;
+       spu_unbind_context(spu, ctx);
+       ctx->stats.invol_ctx_switch++;
+       spu->stats.invol_ctx_switch++;
+       mutex_unlock(&cbe_spu_info[node].list_mutex);
+}
+
 /**
  * spu_activate - find a free spu for a context and execute it
  * @ctx:       spu context to schedule
  */
 int spu_activate(struct spu_context *ctx, unsigned long flags)
 {
-       do {
-               struct spu *spu;
+       struct spu *spu;
 
-               /*
-                * If there are multiple threads waiting for a single context
-                * only one actually binds the context while the others will
-                * only be able to acquire the state_mutex once the context
-                * already is in runnable state.
-                */
-               if (ctx->spu)
-                       return 0;
+       /*
+        * If there are multiple threads waiting for a single context
+        * only one actually binds the context while the others will
+        * only be able to acquire the state_mutex once the context
+        * already is in runnable state.
+        */
+       if (ctx->spu)
+               return 0;
 
-               spu = spu_get_idle(ctx);
-               /*
-                * If this is a realtime thread we try to get it running by
-                * preempting a lower priority thread.
-                */
-               if (!spu && rt_prio(ctx->prio))
-                       spu = find_victim(ctx);
-               if (spu) {
-                       int node = spu->node;
+spu_activate_top:
+       if (signal_pending(current))
+               return -ERESTARTSYS;
 
-                       mutex_lock(&cbe_spu_info[node].list_mutex);
-                       spu_bind_context(spu, ctx);
-                       cbe_spu_info[node].nr_active++;
-                       mutex_unlock(&cbe_spu_info[node].list_mutex);
-                       wake_up_all(&ctx->run_wq);
-                       return 0;
-               }
+       spu = spu_get_idle(ctx);
+       /*
+        * If this is a realtime thread we try to get it running by
+        * preempting a lower priority thread.
+        */
+       if (!spu && rt_prio(ctx->prio))
+               spu = find_victim(ctx);
+       if (spu) {
+               unsigned long runcntl;
 
+               runcntl = ctx->ops->runcntl_read(ctx);
+               __spu_schedule(spu, ctx);
+               if (runcntl & SPU_RUNCNTL_RUNNABLE)
+                       spuctx_switch_state(ctx, SPU_UTIL_USER);
+
+               return 0;
+       }
+
+       if (ctx->flags & SPU_CREATE_NOSCHED) {
                spu_prio_wait(ctx);
-       } while (!signal_pending(current));
+               goto spu_activate_top;
+       }
 
-       return -ERESTARTSYS;
+       spu_add_to_rq(ctx);
+
+       return 0;
 }
 
 /**
        if (spu) {
                new = grab_runnable_context(max_prio, spu->node);
                if (new || force) {
-                       int node = spu->node;
-
-                       mutex_lock(&cbe_spu_info[node].list_mutex);
-                       spu_unbind_context(spu, ctx);
-                       spu->alloc_state = SPU_FREE;
-                       cbe_spu_info[node].nr_active--;
-                       mutex_unlock(&cbe_spu_info[node].list_mutex);
-
-                       ctx->stats.vol_ctx_switch++;
-                       spu->stats.vol_ctx_switch++;
-
-                       if (new)
-                               wake_up(&new->stop_wq);
+                       spu_unschedule(spu, ctx);
+                       if (new) {
+                               if (new->flags & SPU_CREATE_NOSCHED)
+                                       wake_up(&new->stop_wq);
+                               else {
+                                       spu_release(ctx);
+                                       spu_schedule(spu, new);
+                                       spu_acquire(ctx);
+                               }
+                       }
                }
-
        }
 
        return new != NULL;
 
 static noinline void spusched_tick(struct spu_context *ctx)
 {
+       struct spu_context *new = NULL;
+       struct spu *spu = NULL;
+       u32 status;
+
+       spu_acquire(ctx);
+
+       if (ctx->state != SPU_STATE_RUNNABLE)
+               goto out;
+       if (spu_stopped(ctx, &status))
+               goto out;
        if (ctx->flags & SPU_CREATE_NOSCHED)
-               return;
+               goto out;
        if (ctx->policy == SCHED_FIFO)
-               return;
+               goto out;
 
        if (--ctx->time_slice)
-               return;
+               goto out;
 
-       /*
-        * Unfortunately list_mutex ranks outside of state_mutex, so
-        * we have to trylock here.  If we fail give the context another
-        * tick and try again.
-        */
-       if (mutex_trylock(&ctx->state_mutex)) {
-               struct spu *spu = ctx->spu;
-               struct spu_context *new;
-
-               new = grab_runnable_context(ctx->prio + 1, spu->node);
-               if (new) {
-                       spu_unbind_context(spu, ctx);
-                       ctx->stats.invol_ctx_switch++;
-                       spu->stats.invol_ctx_switch++;
-                       spu->alloc_state = SPU_FREE;
-                       cbe_spu_info[spu->node].nr_active--;
-                       wake_up(&new->stop_wq);
-                       /*
-                        * We need to break out of the wait loop in
-                        * spu_run manually to ensure this context
-                        * gets put on the runqueue again ASAP.
-                        */
-                       wake_up(&ctx->stop_wq);
-               }
-               spu_set_timeslice(ctx);
-               mutex_unlock(&ctx->state_mutex);
+       spu = ctx->spu;
+       new = grab_runnable_context(ctx->prio + 1, spu->node);
+       if (new) {
+               spu_unschedule(spu, ctx);
+               spu_add_to_rq(ctx);
        } else {
                ctx->time_slice++;
        }
+out:
+       spu_release(ctx);
+
+       if (new)
+               spu_schedule(spu, new);
 }
 
 /**
                set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                for (node = 0; node < MAX_NUMNODES; node++) {
-                       mutex_lock(&cbe_spu_info[node].list_mutex);
-                       list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
-                               if (spu->ctx)
-                                       spusched_tick(spu->ctx);
-                       mutex_unlock(&cbe_spu_info[node].list_mutex);
+                       struct mutex *mtx = &cbe_spu_info[node].list_mutex;
+
+                       mutex_lock(mtx);
+                       list_for_each_entry(spu, &cbe_spu_info[node].spus,
+                                       cbe_list) {
+                               struct spu_context *ctx = spu->ctx;
+
+                               if (ctx) {
+                                       mutex_unlock(mtx);
+                                       spusched_tick(ctx);
+                                       mutex_lock(mtx);
+                               }
+                       }
+                       mutex_unlock(mtx);
                }
        }