3 #include <linux/wait.h>
4 #include <linux/ptrace.h>
7 #include <asm/spu_priv1.h>
9 #include <asm/unistd.h>
13 /* interrupt-level stop callback function. */
14 void spufs_stop_callback(struct spu *spu)
16 struct spu_context *ctx = spu->ctx;
19 * It should be impossible to preempt a context while an exception
20 * is being processed, since the context switch code is specially
21 * coded to deal with interrupts ... But, just in case, sanity check
22 * the context pointer. It is OK to return doing nothing since
23 * the exception will be regenerated when the context is resumed.
26 /* Copy exception arguments into module specific structure */
27 ctx->csa.class_0_pending = spu->class_0_pending;
28 ctx->csa.dsisr = spu->dsisr;
29 ctx->csa.dar = spu->dar;
31 /* ensure that the exception status has hit memory before a
32 * thread waiting on the context's stop queue is woken */
35 wake_up_all(&ctx->stop_wq);
38 /* Clear callback arguments from spu structure */
39 spu->class_0_pending = 0;
44 static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
49 *stat = ctx->ops->status_read(ctx);
52 if (ctx->state != SPU_STATE_RUNNABLE ||
53 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
55 pte_fault = ctx->csa.dsisr &
56 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
57 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
61 static int spu_setup_isolated(struct spu_context *ctx)
64 u64 __iomem *mfc_cntl;
67 unsigned long timeout;
68 const u32 status_loading = SPU_STATUS_RUNNING
69 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
76 * We need to exclude userspace access to the context.
78 * To protect against memory access we invalidate all ptes
79 * and make sure the pagefault handlers block on the mutex.
81 spu_unmap_mappings(ctx);
83 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
85 /* purge the MFC DMA queue to ensure no spurious accesses before we
86 * enter kernel mode */
87 timeout = jiffies + HZ;
88 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
89 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
90 != MFC_CNTL_PURGE_DMA_COMPLETE) {
91 if (time_after(jiffies, timeout)) {
92 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
100 /* put the SPE in kernel mode to allow access to the loader */
101 sr1 = spu_mfc_sr1_get(ctx->spu);
102 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
103 spu_mfc_sr1_set(ctx->spu, sr1);
105 /* start the loader */
106 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
107 ctx->ops->signal2_write(ctx,
108 (unsigned long)isolated_loader & 0xffffffff);
110 ctx->ops->runcntl_write(ctx,
111 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
114 timeout = jiffies + HZ;
115 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
117 if (time_after(jiffies, timeout)) {
118 printk(KERN_ERR "%s: timeout waiting for loader\n",
126 if (!(status & SPU_STATUS_RUNNING)) {
127 /* If isolated LOAD has failed: run SPU, we will get a stop-and
129 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
130 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
135 if (!(status & SPU_STATUS_ISOLATED_STATE)) {
136 /* This isn't allowed by the CBEA, but check anyway */
137 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
138 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
144 /* Finished accessing the loader. Drop kernel mode */
145 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
146 spu_mfc_sr1_set(ctx->spu, sr1);
152 static int spu_run_init(struct spu_context *ctx, u32 *npc)
154 unsigned long runcntl;
156 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
158 if (ctx->flags & SPU_CREATE_ISOLATE) {
160 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
161 int ret = spu_setup_isolated(ctx);
166 /* if userspace has set the runcntrl register (eg, to issue an
167 * isolated exit), we need to re-set it here */
168 runcntl = ctx->ops->runcntl_read(ctx) &
169 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
171 runcntl = SPU_RUNCNTL_RUNNABLE;
173 unsigned long privcntl;
175 if (test_thread_flag(TIF_SINGLESTEP))
176 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
178 privcntl = SPU_PRIVCNTL_MODE_NORMAL;
179 runcntl = SPU_RUNCNTL_RUNNABLE;
181 ctx->ops->npc_write(ctx, *npc);
182 ctx->ops->privcntl_write(ctx, privcntl);
185 ctx->ops->runcntl_write(ctx, runcntl);
187 spuctx_switch_state(ctx, SPU_UTIL_USER);
192 static int spu_run_fini(struct spu_context *ctx, u32 *npc,
197 *status = ctx->ops->status_read(ctx);
198 *npc = ctx->ops->npc_read(ctx);
200 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
203 if (signal_pending(current))
209 static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
214 ret = spu_run_fini(ctx, npc, status);
218 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
221 ret = spu_acquire_runnable(ctx, 0);
225 spuctx_switch_state(ctx, SPU_UTIL_USER);
230 * SPU syscall restarting is tricky because we violate the basic
231 * assumption that the signal handler is running on the interrupted
232 * thread. Here instead, the handler runs on PowerPC user space code,
233 * while the syscall was called from the SPU.
234 * This means we can only do a very rough approximation of POSIX
237 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
244 case -ERESTARTNOINTR:
246 * Enter the regular syscall restarting for
247 * sys_spu_run, then restart the SPU syscall
253 case -ERESTARTNOHAND:
254 case -ERESTART_RESTARTBLOCK:
256 * Restart block is too hard for now, just return -EINTR
258 * ERESTARTNOHAND comes from sys_pause, we also return
260 * Assume that we need to be restarted ourselves though.
266 printk(KERN_WARNING "%s: unexpected return code %ld\n",
267 __FUNCTION__, *spu_ret);
273 static int spu_process_callback(struct spu_context *ctx)
275 struct spu_syscall_block s;
281 /* get syscall block from local store */
282 npc = ctx->ops->npc_read(ctx) & ~3;
283 ls = (void __iomem *)ctx->ops->get_ls(ctx);
284 ls_pointer = in_be32(ls + npc);
285 if (ls_pointer > (LS_SIZE - sizeof(s)))
287 memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
289 /* do actual syscall without pinning the spu */
294 if (s.nr_ret < __NR_syscalls) {
296 /* do actual system call from here */
297 spu_ret = spu_sys_callback(&s);
298 if (spu_ret <= -ERESTARTSYS) {
299 ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
302 if (ret == -ERESTARTSYS)
306 /* write result, jump over indirect pointer */
307 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
308 ctx->ops->npc_write(ctx, npc);
309 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
313 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
319 if (mutex_lock_interruptible(&ctx->run_mutex))
323 ctx->event_return = 0;
326 if (ctx->state == SPU_STATE_SAVED) {
327 __spu_update_sched_info(ctx);
328 spu_set_timeslice(ctx);
330 ret = spu_activate(ctx, 0);
337 * We have to update the scheduling priority under active_mutex
338 * to protect against find_victim().
340 * No need to update the timeslice ASAP, it will get updated
341 * once the current one has expired.
343 spu_update_sched_info(ctx);
346 ret = spu_run_init(ctx, npc);
353 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
357 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
358 &ctx->sched_flags))) {
359 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
360 spu_switch_notify(spu, ctx);
365 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
367 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
368 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
369 ret = spu_process_callback(ctx);
372 status &= ~SPU_STATUS_STOPPED_BY_STOP;
374 ret = spufs_handle_class1(ctx);
378 ret = spufs_handle_class0(ctx);
382 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
383 ret = spu_reacquire_runnable(ctx, npc, &status);
389 if (signal_pending(current))
393 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
394 SPU_STATUS_STOPPED_BY_HALT |
395 SPU_STATUS_SINGLE_STEP)));
397 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
398 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) &&
399 (ctx->state == SPU_STATE_RUNNABLE))
400 ctx->stats.libassist++;
403 spu_disable_spu(ctx);
404 ret = spu_run_fini(ctx, npc, &status);
409 ((ret == -ERESTARTSYS) &&
410 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
411 (status & SPU_STATUS_SINGLE_STEP) ||
412 ((status & SPU_STATUS_STOPPED_BY_STOP) &&
413 (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
416 /* Note: we don't need to force_sig SIGTRAP on single-step
417 * since we have TIF_SINGLESTEP set, thus the kernel will do
418 * it upon return from the syscall anyawy
420 if ((status & SPU_STATUS_STOPPED_BY_STOP)
421 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
422 force_sig(SIGTRAP, current);
427 *event = ctx->event_return;
428 mutex_unlock(&ctx->run_mutex);