* if it is timesliced or preempted.
*/
ctx->cpus_allowed = current->cpus_allowed;
+
+ /* Save the current cpu id for spu interrupt routing. */
+ ctx->last_ran = raw_smp_processor_id();
}
void spu_update_sched_info(struct spu_context *ctx)
spu->mfc_callback = spufs_mfc_callback;
mb();
spu_unmap_mappings(ctx);
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
- spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx);
ctx->state = SPU_STATE_RUNNABLE;
- spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
+ spuctx_switch_state(ctx, SPU_UTIL_USER);
}
/*
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
spu->timestamp = jiffies;
ctx->state = SPU_STATE_SAVED;
spu->ibox_callback = NULL;
struct spu *spu;
int node, n;
- spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+ spu_context_nospu_trace(spu_find_victim__enter, ctx);
/*
* Look for a possible preemption candidate on the local node first.
victim->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
- spu_add_to_rq(victim);
+ if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+ spu_add_to_rq(victim);
mutex_unlock(&victim->state_mutex);
{
struct spu_context *new = NULL;
struct spu *spu = NULL;
- u32 status;
if (spu_acquire(ctx))
BUG(); /* a kernel thread never has signals pending */
if (ctx->state != SPU_STATE_RUNNABLE)
goto out;
- if (spu_stopped(ctx, &status))
- goto out;
if (ctx->flags & SPU_CREATE_NOSCHED)
goto out;
if (ctx->policy == SCHED_FIFO)
goto out;
- if (--ctx->time_slice)
+ if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
goto out;
spu = ctx->spu;
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
spu_unschedule(spu, ctx);
- spu_add_to_rq(ctx);
+ if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+ spu_add_to_rq(ctx);
} else {
spu_context_nospu_trace(spusched_tick__newslice, ctx);
ctx->time_slice++;