init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
init_waitqueue_head(&ctx->mfc_wq);
+ init_waitqueue_head(&ctx->run_wq);
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
BUG_ON(!list_empty(&ctx->rq));
atomic_dec(&nr_spu_contexts);
+ kfree(ctx->switch_log);
kfree(ctx);
}
void spu_forget(struct spu_context *ctx)
{
struct mm_struct *mm;
- spu_acquire_saved(ctx);
+
+ /*
+ * This is basically an open-coded spu_acquire_saved, except that
+ * we don't acquire the state mutex interruptible, and we don't
+ * want this context to be rescheduled on release.
+ */
+ mutex_lock(&ctx->state_mutex);
+ if (ctx->state != SPU_STATE_SAVED)
+ spu_deactivate(ctx);
+
mm = ctx->owner;
ctx->owner = NULL;
mmput(mm);
}
/**
- * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
+ * spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
- *
- * Note:
- * Returns 0 and with the context locked on success
- * Returns negative error and with the context _unlocked_ on failure.
*/
-int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
+int spu_acquire_saved(struct spu_context *ctx)
{
- int ret = -EINVAL;
-
- spu_acquire(ctx);
- if (ctx->state == SPU_STATE_SAVED) {
- /*
- * Context is about to be freed, so we can't acquire it anymore.
- */
- if (!ctx->owner)
- goto out_unlock;
- ret = spu_activate(ctx, flags);
- if (ret)
- goto out_unlock;
- }
-
- return 0;
+ int ret;
- out_unlock:
- spu_release(ctx);
- return ret;
-}
+ ret = spu_acquire(ctx);
+ if (ret)
+ return ret;
-/**
- * spu_acquire_saved - lock spu contex and make sure it is in saved state
- * @ctx: spu contex to lock
- */
-void spu_acquire_saved(struct spu_context *ctx)
-{
- spu_acquire(ctx);
if (ctx->state != SPU_STATE_SAVED) {
set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
spu_deactivate(ctx);
}
+
+ return 0;
}
/**
{
BUG_ON(ctx->state != SPU_STATE_SAVED);
- if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags))
+ if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) &&
+ test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
spu_activate(ctx, 0);
spu_release(ctx);
}
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void ( * prof_info_release) (struct kref *kref))
-{
- ctx->prof_priv_kref = prof_info_kref;
- ctx->prof_priv_release = prof_info_release;
-}
-EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
-
-void *spu_get_profile_private_kref(struct spu_context *ctx)
-{
- return ctx->prof_priv_kref;
-}
-EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
-
-