This change disables the logic that faults-in spu contexts under the
covers from the page fault handler. When a fault requires a runnable
context, the handler will block until the context is scheduled by
other means.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
init_waitqueue_head(&ctx->mfc_wq);
init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
init_waitqueue_head(&ctx->mfc_wq);
+ init_waitqueue_head(&ctx->run_wq);
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
{
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
{
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= ps_size)
return NOPFN_SIGBUS;
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= ps_size)
return NOPFN_SIGBUS;
- /* error here usually means a signal.. we might want to test
- * the error code more precisely though
+ /*
+ * We have to wait for context to be loaded before we have
+ * pages to hand out to the user, but we don't want to wait
+ * with the mmap_sem held.
+ * It is possible to drop the mmap_sem here, but then we need
+ * to return NOPFN_REFAULT because the mappings may have
+ * hanged.
- ret = spu_acquire_runnable(ctx, 0);
- if (ret)
- return NOPFN_REFAULT;
+ spu_acquire(ctx);
+ if (ctx->state == SPU_STATE_SAVED) {
+ up_read(¤t->mm->mmap_sem);
+ spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+ down_read(¤t->mm->mmap_sem);
+ goto out;
+ }
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
spu_release(ctx);
return NOPFN_REFAULT;
spu_release(ctx);
return NOPFN_REFAULT;
- ret = spu_acquire_runnable(ctx, 0);
+ spu_acquire(ctx);
+ ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_bind_context(spu, ctx);
cbe_spu_info[node].nr_active++;
mutex_unlock(&cbe_spu_info[node].list_mutex);
spu_bind_context(spu, ctx);
cbe_spu_info[node].nr_active++;
mutex_unlock(&cbe_spu_info[node].list_mutex);
+ wake_up_all(&ctx->run_wq);
wait_queue_head_t wbox_wq;
wait_queue_head_t stop_wq;
wait_queue_head_t mfc_wq;
wait_queue_head_t wbox_wq;
wait_queue_head_t stop_wq;
wait_queue_head_t mfc_wq;
+ wait_queue_head_t run_wq;
struct fasync_struct *ibox_fasync;
struct fasync_struct *wbox_fasync;
struct fasync_struct *mfc_fasync;
struct fasync_struct *ibox_fasync;
struct fasync_struct *wbox_fasync;
struct fasync_struct *mfc_fasync;