1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/marker.h>
45 #include <asm/mmu_context.h>
47 #include <asm/spu_csa.h>
48 #include <asm/spu_priv1.h>
51 struct spu_prio_array {
52 DECLARE_BITMAP(bitmap, MAX_PRIO);
53 struct list_head runq[MAX_PRIO];
58 static unsigned long spu_avenrun[3];
59 static struct spu_prio_array *spu_prio;
60 static struct task_struct *spusched_task;
61 static struct timer_list spusched_timer;
62 static struct timer_list spuloadavg_timer;
65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
67 #define NORMAL_PRIO 120
70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
71 * tick for every 10 CPU scheduler ticks.
73 #define SPUSCHED_TICK (10)
76 * These are the 'tuning knobs' of the scheduler:
78 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
81 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
84 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
85 #define SCALE_PRIO(x, prio) \
86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90 * [800ms ... 100ms ... 5ms]
92 * The higher a thread's priority, the bigger timeslices
93 * it gets during one round of execution. But even the lowest
94 * priority thread gets MIN_TIMESLICE worth of execution time.
96 void spu_set_timeslice(struct spu_context *ctx)
98 if (ctx->prio < NORMAL_PRIO)
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
101 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
105 * Update scheduling information from the owning thread.
107 void __spu_update_sched_info(struct spu_context *ctx)
110 * assert that the context is not on the runqueue, so it is safe
111 * to change its scheduling parameters.
113 BUG_ON(!list_empty(&ctx->rq));
116 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 * memory ordering here because retrieving the controlling thread is
118 * per definition racy.
120 ctx->tid = current->pid;
123 * We do our own priority calculations, so we normally want
124 * ->static_prio to start with. Unfortunately this field
125 * contains junk for threads with a realtime scheduling
126 * policy so we have to look at ->prio in this case.
128 if (rt_prio(current->prio))
129 ctx->prio = current->prio;
131 ctx->prio = current->static_prio;
132 ctx->policy = current->policy;
135 * TO DO: the context may be loaded, so we may need to activate
136 * it again on a different node. But it shouldn't hurt anything
137 * to update its parameters, because we know that the scheduler
138 * is not actively looking at this field, since it is not on the
139 * runqueue. The context will be rescheduled on the proper node
140 * if it is timesliced or preempted.
142 ctx->cpus_allowed = current->cpus_allowed;
145 void spu_update_sched_info(struct spu_context *ctx)
149 if (ctx->state == SPU_STATE_RUNNABLE) {
150 node = ctx->spu->node;
153 * Take list_mutex to sync with find_victim().
155 mutex_lock(&cbe_spu_info[node].list_mutex);
156 __spu_update_sched_info(ctx);
157 mutex_unlock(&cbe_spu_info[node].list_mutex);
159 __spu_update_sched_info(ctx);
163 static int __node_allowed(struct spu_context *ctx, int node)
165 if (nr_cpus_node(node)) {
166 cpumask_t mask = node_to_cpumask(node);
168 if (cpus_intersects(mask, ctx->cpus_allowed))
175 static int node_allowed(struct spu_context *ctx, int node)
179 spin_lock(&spu_prio->runq_lock);
180 rval = __node_allowed(ctx, node);
181 spin_unlock(&spu_prio->runq_lock);
186 void do_notify_spus_active(void)
191 * Wake up the active spu_contexts.
193 * When the awakened processes see their "notify_active" flag is set,
194 * they will call spu_switch_notify().
196 for_each_online_node(node) {
199 mutex_lock(&cbe_spu_info[node].list_mutex);
200 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
201 if (spu->alloc_state != SPU_FREE) {
202 struct spu_context *ctx = spu->ctx;
203 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
206 wake_up_all(&ctx->stop_wq);
209 mutex_unlock(&cbe_spu_info[node].list_mutex);
214 * spu_bind_context - bind spu context to physical spu
215 * @spu: physical spu to bind to
216 * @ctx: context to bind
218 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
220 spu_context_trace(spu_bind_context__enter, ctx, spu);
222 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
224 if (ctx->flags & SPU_CREATE_NOSCHED)
225 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
227 ctx->stats.slb_flt_base = spu->stats.slb_flt;
228 ctx->stats.class2_intr_base = spu->stats.class2_intr;
233 ctx->ops = &spu_hw_ops;
234 spu->pid = current->pid;
235 spu->tgid = current->tgid;
236 spu_associate_mm(spu, ctx->owner);
237 spu->ibox_callback = spufs_ibox_callback;
238 spu->wbox_callback = spufs_wbox_callback;
239 spu->stop_callback = spufs_stop_callback;
240 spu->mfc_callback = spufs_mfc_callback;
242 spu_unmap_mappings(ctx);
243 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
244 spu_restore(&ctx->csa, spu);
245 spu->timestamp = jiffies;
246 spu_cpu_affinity_set(spu, raw_smp_processor_id());
247 spu_switch_notify(spu, ctx);
248 ctx->state = SPU_STATE_RUNNABLE;
250 spuctx_switch_state(ctx, SPU_UTIL_USER);
254 * Must be used with the list_mutex held.
256 static inline int sched_spu(struct spu *spu)
258 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
260 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
263 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
265 struct spu_context *ctx;
267 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
268 if (list_empty(&ctx->aff_list))
269 list_add(&ctx->aff_list, &gang->aff_list_head);
271 gang->aff_flags |= AFF_MERGED;
274 static void aff_set_offsets(struct spu_gang *gang)
276 struct spu_context *ctx;
280 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
282 if (&ctx->aff_list == &gang->aff_list_head)
284 ctx->aff_offset = offset--;
288 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
289 if (&ctx->aff_list == &gang->aff_list_head)
291 ctx->aff_offset = offset++;
294 gang->aff_flags |= AFF_OFFSETS_SET;
297 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
298 int group_size, int lowest_offset)
304 * TODO: A better algorithm could be used to find a good spu to be
305 * used as reference location for the ctxs chain.
307 node = cpu_to_node(raw_smp_processor_id());
308 for (n = 0; n < MAX_NUMNODES; n++, node++) {
309 node = (node < MAX_NUMNODES) ? node : 0;
310 if (!node_allowed(ctx, node))
312 mutex_lock(&cbe_spu_info[node].list_mutex);
313 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
314 if ((!mem_aff || spu->has_mem_affinity) &&
316 mutex_unlock(&cbe_spu_info[node].list_mutex);
320 mutex_unlock(&cbe_spu_info[node].list_mutex);
325 static void aff_set_ref_point_location(struct spu_gang *gang)
327 int mem_aff, gs, lowest_offset;
328 struct spu_context *ctx;
331 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
335 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
338 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
340 if (&ctx->aff_list == &gang->aff_list_head)
342 lowest_offset = ctx->aff_offset;
345 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
349 static struct spu *ctx_location(struct spu *ref, int offset, int node)
355 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
356 BUG_ON(spu->node != node);
363 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
364 BUG_ON(spu->node != node);
376 * affinity_check is called each time a context is going to be scheduled.
377 * It returns the spu ptr on which the context must run.
379 static int has_affinity(struct spu_context *ctx)
381 struct spu_gang *gang = ctx->gang;
383 if (list_empty(&ctx->aff_list))
386 if (!gang->aff_ref_spu) {
387 if (!(gang->aff_flags & AFF_MERGED))
388 aff_merge_remaining_ctxs(gang);
389 if (!(gang->aff_flags & AFF_OFFSETS_SET))
390 aff_set_offsets(gang);
391 aff_set_ref_point_location(gang);
394 return gang->aff_ref_spu != NULL;
398 * spu_unbind_context - unbind spu context from physical spu
399 * @spu: physical spu to unbind from
400 * @ctx: context to unbind
402 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
404 spu_context_trace(spu_unbind_context__enter, ctx, spu);
406 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
408 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
409 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
412 mutex_lock(&ctx->gang->aff_mutex);
413 if (has_affinity(ctx)) {
414 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
415 ctx->gang->aff_ref_spu = NULL;
417 mutex_unlock(&ctx->gang->aff_mutex);
420 spu_switch_notify(spu, NULL);
421 spu_unmap_mappings(ctx);
422 spu_save(&ctx->csa, spu);
423 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
424 spu->timestamp = jiffies;
425 ctx->state = SPU_STATE_SAVED;
426 spu->ibox_callback = NULL;
427 spu->wbox_callback = NULL;
428 spu->stop_callback = NULL;
429 spu->mfc_callback = NULL;
430 spu_associate_mm(spu, NULL);
433 ctx->ops = &spu_backing_ops;
437 ctx->stats.slb_flt +=
438 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
439 ctx->stats.class2_intr +=
440 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
442 /* This maps the underlying spu state to idle */
443 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
448 * spu_add_to_rq - add a context to the runqueue
449 * @ctx: context to add
451 static void __spu_add_to_rq(struct spu_context *ctx)
454 * Unfortunately this code path can be called from multiple threads
455 * on behalf of a single context due to the way the problem state
456 * mmap support works.
458 * Fortunately we need to wake up all these threads at the same time
459 * and can simply skip the runqueue addition for every but the first
460 * thread getting into this codepath.
462 * It's still quite hacky, and long-term we should proxy all other
463 * threads through the owner thread so that spu_run is in control
464 * of all the scheduling activity for a given context.
466 if (list_empty(&ctx->rq)) {
467 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
468 set_bit(ctx->prio, spu_prio->bitmap);
469 if (!spu_prio->nr_waiting++)
470 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
474 static void spu_add_to_rq(struct spu_context *ctx)
476 spin_lock(&spu_prio->runq_lock);
477 __spu_add_to_rq(ctx);
478 spin_unlock(&spu_prio->runq_lock);
481 static void __spu_del_from_rq(struct spu_context *ctx)
483 int prio = ctx->prio;
485 if (!list_empty(&ctx->rq)) {
486 if (!--spu_prio->nr_waiting)
487 del_timer(&spusched_timer);
488 list_del_init(&ctx->rq);
490 if (list_empty(&spu_prio->runq[prio]))
491 clear_bit(prio, spu_prio->bitmap);
495 void spu_del_from_rq(struct spu_context *ctx)
497 spin_lock(&spu_prio->runq_lock);
498 __spu_del_from_rq(ctx);
499 spin_unlock(&spu_prio->runq_lock);
502 static void spu_prio_wait(struct spu_context *ctx)
507 * The caller must explicitly wait for a context to be loaded
508 * if the nosched flag is set. If NOSCHED is not set, the caller
509 * queues the context and waits for an spu event or error.
511 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
513 spin_lock(&spu_prio->runq_lock);
514 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
515 if (!signal_pending(current)) {
516 __spu_add_to_rq(ctx);
517 spin_unlock(&spu_prio->runq_lock);
518 mutex_unlock(&ctx->state_mutex);
520 mutex_lock(&ctx->state_mutex);
521 spin_lock(&spu_prio->runq_lock);
522 __spu_del_from_rq(ctx);
524 spin_unlock(&spu_prio->runq_lock);
525 __set_current_state(TASK_RUNNING);
526 remove_wait_queue(&ctx->stop_wq, &wait);
529 static struct spu *spu_get_idle(struct spu_context *ctx)
531 struct spu *spu, *aff_ref_spu;
534 spu_context_nospu_trace(spu_get_idle__enter, ctx);
537 mutex_lock(&ctx->gang->aff_mutex);
538 if (has_affinity(ctx)) {
539 aff_ref_spu = ctx->gang->aff_ref_spu;
540 atomic_inc(&ctx->gang->aff_sched_count);
541 mutex_unlock(&ctx->gang->aff_mutex);
542 node = aff_ref_spu->node;
544 mutex_lock(&cbe_spu_info[node].list_mutex);
545 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
546 if (spu && spu->alloc_state == SPU_FREE)
548 mutex_unlock(&cbe_spu_info[node].list_mutex);
550 mutex_lock(&ctx->gang->aff_mutex);
551 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
552 ctx->gang->aff_ref_spu = NULL;
553 mutex_unlock(&ctx->gang->aff_mutex);
556 mutex_unlock(&ctx->gang->aff_mutex);
558 node = cpu_to_node(raw_smp_processor_id());
559 for (n = 0; n < MAX_NUMNODES; n++, node++) {
560 node = (node < MAX_NUMNODES) ? node : 0;
561 if (!node_allowed(ctx, node))
564 mutex_lock(&cbe_spu_info[node].list_mutex);
565 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
566 if (spu->alloc_state == SPU_FREE)
569 mutex_unlock(&cbe_spu_info[node].list_mutex);
573 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
577 spu->alloc_state = SPU_USED;
578 mutex_unlock(&cbe_spu_info[node].list_mutex);
579 spu_context_trace(spu_get_idle__found, ctx, spu);
580 spu_init_channels(spu);
585 * find_victim - find a lower priority context to preempt
586 * @ctx: canidate context for running
588 * Returns the freed physical spu to run the new context on.
590 static struct spu *find_victim(struct spu_context *ctx)
592 struct spu_context *victim = NULL;
596 spu_context_nospu_trace(spu_find_vitim__enter, ctx);
599 * Look for a possible preemption candidate on the local node first.
600 * If there is no candidate look at the other nodes. This isn't
601 * exactly fair, but so far the whole spu scheduler tries to keep
602 * a strong node affinity. We might want to fine-tune this in
606 node = cpu_to_node(raw_smp_processor_id());
607 for (n = 0; n < MAX_NUMNODES; n++, node++) {
608 node = (node < MAX_NUMNODES) ? node : 0;
609 if (!node_allowed(ctx, node))
612 mutex_lock(&cbe_spu_info[node].list_mutex);
613 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
614 struct spu_context *tmp = spu->ctx;
616 if (tmp && tmp->prio > ctx->prio &&
617 !(tmp->flags & SPU_CREATE_NOSCHED) &&
618 (!victim || tmp->prio > victim->prio))
621 mutex_unlock(&cbe_spu_info[node].list_mutex);
625 * This nests ctx->state_mutex, but we always lock
626 * higher priority contexts before lower priority
627 * ones, so this is safe until we introduce
628 * priority inheritance schemes.
630 * XXX if the highest priority context is locked,
631 * this can loop a long time. Might be better to
632 * look at another context or give up after X retries.
634 if (!mutex_trylock(&victim->state_mutex)) {
640 if (!spu || victim->prio <= ctx->prio) {
642 * This race can happen because we've dropped
643 * the active list mutex. Not a problem, just
644 * restart the search.
646 mutex_unlock(&victim->state_mutex);
651 spu_context_trace(__spu_deactivate__unload, ctx, spu);
653 mutex_lock(&cbe_spu_info[node].list_mutex);
654 cbe_spu_info[node].nr_active--;
655 spu_unbind_context(spu, victim);
656 mutex_unlock(&cbe_spu_info[node].list_mutex);
658 victim->stats.invol_ctx_switch++;
659 spu->stats.invol_ctx_switch++;
660 spu_add_to_rq(victim);
662 mutex_unlock(&victim->state_mutex);
671 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
673 int node = spu->node;
676 spu_set_timeslice(ctx);
678 mutex_lock(&cbe_spu_info[node].list_mutex);
679 if (spu->ctx == NULL) {
680 spu_bind_context(spu, ctx);
681 cbe_spu_info[node].nr_active++;
682 spu->alloc_state = SPU_USED;
685 mutex_unlock(&cbe_spu_info[node].list_mutex);
688 wake_up_all(&ctx->run_wq);
693 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
695 /* not a candidate for interruptible because it's called either
696 from the scheduler thread or from spu_deactivate */
697 mutex_lock(&ctx->state_mutex);
698 __spu_schedule(spu, ctx);
702 static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
704 int node = spu->node;
706 mutex_lock(&cbe_spu_info[node].list_mutex);
707 cbe_spu_info[node].nr_active--;
708 spu->alloc_state = SPU_FREE;
709 spu_unbind_context(spu, ctx);
710 ctx->stats.invol_ctx_switch++;
711 spu->stats.invol_ctx_switch++;
712 mutex_unlock(&cbe_spu_info[node].list_mutex);
716 * spu_activate - find a free spu for a context and execute it
717 * @ctx: spu context to schedule
718 * @flags: flags (currently ignored)
720 * Tries to find a free spu to run @ctx. If no free spu is available
721 * add the context to the runqueue so it gets woken up once an spu
724 int spu_activate(struct spu_context *ctx, unsigned long flags)
729 * If there are multiple threads waiting for a single context
730 * only one actually binds the context while the others will
731 * only be able to acquire the state_mutex once the context
732 * already is in runnable state.
738 if (signal_pending(current))
741 spu = spu_get_idle(ctx);
743 * If this is a realtime thread we try to get it running by
744 * preempting a lower priority thread.
746 if (!spu && rt_prio(ctx->prio))
747 spu = find_victim(ctx);
749 unsigned long runcntl;
751 runcntl = ctx->ops->runcntl_read(ctx);
752 __spu_schedule(spu, ctx);
753 if (runcntl & SPU_RUNCNTL_RUNNABLE)
754 spuctx_switch_state(ctx, SPU_UTIL_USER);
759 if (ctx->flags & SPU_CREATE_NOSCHED) {
761 goto spu_activate_top;
770 * grab_runnable_context - try to find a runnable context
772 * Remove the highest priority context on the runqueue and return it
773 * to the caller. Returns %NULL if no runnable context was found.
775 static struct spu_context *grab_runnable_context(int prio, int node)
777 struct spu_context *ctx;
780 spin_lock(&spu_prio->runq_lock);
781 best = find_first_bit(spu_prio->bitmap, prio);
782 while (best < prio) {
783 struct list_head *rq = &spu_prio->runq[best];
785 list_for_each_entry(ctx, rq, rq) {
786 /* XXX(hch): check for affinity here aswell */
787 if (__node_allowed(ctx, node)) {
788 __spu_del_from_rq(ctx);
796 spin_unlock(&spu_prio->runq_lock);
800 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
802 struct spu *spu = ctx->spu;
803 struct spu_context *new = NULL;
806 new = grab_runnable_context(max_prio, spu->node);
808 spu_unschedule(spu, ctx);
810 if (new->flags & SPU_CREATE_NOSCHED)
811 wake_up(&new->stop_wq);
814 spu_schedule(spu, new);
815 /* this one can't easily be made
817 mutex_lock(&ctx->state_mutex);
827 * spu_deactivate - unbind a context from it's physical spu
828 * @ctx: spu context to unbind
830 * Unbind @ctx from the physical spu it is running on and schedule
831 * the highest priority context to run on the freed physical spu.
833 void spu_deactivate(struct spu_context *ctx)
835 spu_context_nospu_trace(spu_deactivate__enter, ctx);
836 __spu_deactivate(ctx, 1, MAX_PRIO);
840 * spu_yield - yield a physical spu if others are waiting
841 * @ctx: spu context to yield
843 * Check if there is a higher priority context waiting and if yes
844 * unbind @ctx from the physical spu and schedule the highest
845 * priority context to run on the freed physical spu instead.
847 void spu_yield(struct spu_context *ctx)
849 spu_context_nospu_trace(spu_yield__enter, ctx);
850 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
851 mutex_lock(&ctx->state_mutex);
852 __spu_deactivate(ctx, 0, MAX_PRIO);
853 mutex_unlock(&ctx->state_mutex);
857 static noinline void spusched_tick(struct spu_context *ctx)
859 struct spu_context *new = NULL;
860 struct spu *spu = NULL;
862 if (spu_acquire(ctx))
863 BUG(); /* a kernel thread never has signals pending */
865 if (ctx->state != SPU_STATE_RUNNABLE)
867 if (ctx->flags & SPU_CREATE_NOSCHED)
869 if (ctx->policy == SCHED_FIFO)
872 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
877 spu_context_trace(spusched_tick__preempt, ctx, spu);
879 new = grab_runnable_context(ctx->prio + 1, spu->node);
881 spu_unschedule(spu, ctx);
882 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
885 spu_context_nospu_trace(spusched_tick__newslice, ctx);
892 spu_schedule(spu, new);
896 * count_active_contexts - count nr of active tasks
898 * Return the number of tasks currently running or waiting to run.
900 * Note that we don't take runq_lock / list_mutex here. Reading
901 * a single 32bit value is atomic on powerpc, and we don't care
902 * about memory ordering issues here.
904 static unsigned long count_active_contexts(void)
906 int nr_active = 0, node;
908 for (node = 0; node < MAX_NUMNODES; node++)
909 nr_active += cbe_spu_info[node].nr_active;
910 nr_active += spu_prio->nr_waiting;
916 * spu_calc_load - update the avenrun load estimates.
918 * No locking against reading these values from userspace, as for
919 * the CPU loadavg code.
921 static void spu_calc_load(void)
923 unsigned long active_tasks; /* fixed-point */
925 active_tasks = count_active_contexts() * FIXED_1;
926 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
927 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
928 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
931 static void spusched_wake(unsigned long data)
933 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
934 wake_up_process(spusched_task);
937 static void spuloadavg_wake(unsigned long data)
939 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
943 static int spusched_thread(void *unused)
948 while (!kthread_should_stop()) {
949 set_current_state(TASK_INTERRUPTIBLE);
951 for (node = 0; node < MAX_NUMNODES; node++) {
952 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
955 list_for_each_entry(spu, &cbe_spu_info[node].spus,
957 struct spu_context *ctx = spu->ctx;
972 void spuctx_switch_state(struct spu_context *ctx,
973 enum spu_utilization_state new_state)
975 unsigned long long curtime;
976 signed long long delta;
979 enum spu_utilization_state old_state;
982 curtime = timespec_to_ns(&ts);
983 delta = curtime - ctx->stats.tstamp;
985 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
989 old_state = ctx->stats.util_state;
990 ctx->stats.util_state = new_state;
991 ctx->stats.tstamp = curtime;
994 * Update the physical SPU utilization statistics.
997 ctx->stats.times[old_state] += delta;
998 spu->stats.times[old_state] += delta;
999 spu->stats.util_state = new_state;
1000 spu->stats.tstamp = curtime;
1004 #define LOAD_INT(x) ((x) >> FSHIFT)
1005 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1007 static int show_spu_loadavg(struct seq_file *s, void *private)
1011 a = spu_avenrun[0] + (FIXED_1/200);
1012 b = spu_avenrun[1] + (FIXED_1/200);
1013 c = spu_avenrun[2] + (FIXED_1/200);
1016 * Note that last_pid doesn't really make much sense for the
1017 * SPU loadavg (it even seems very odd on the CPU side...),
1018 * but we include it here to have a 100% compatible interface.
1020 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1021 LOAD_INT(a), LOAD_FRAC(a),
1022 LOAD_INT(b), LOAD_FRAC(b),
1023 LOAD_INT(c), LOAD_FRAC(c),
1024 count_active_contexts(),
1025 atomic_read(&nr_spu_contexts),
1026 current->nsproxy->pid_ns->last_pid);
1030 static int spu_loadavg_open(struct inode *inode, struct file *file)
1032 return single_open(file, show_spu_loadavg, NULL);
1035 static const struct file_operations spu_loadavg_fops = {
1036 .open = spu_loadavg_open,
1038 .llseek = seq_lseek,
1039 .release = single_release,
1042 int __init spu_sched_init(void)
1044 struct proc_dir_entry *entry;
1045 int err = -ENOMEM, i;
1047 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1051 for (i = 0; i < MAX_PRIO; i++) {
1052 INIT_LIST_HEAD(&spu_prio->runq[i]);
1053 __clear_bit(i, spu_prio->bitmap);
1055 spin_lock_init(&spu_prio->runq_lock);
1057 setup_timer(&spusched_timer, spusched_wake, 0);
1058 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1060 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1061 if (IS_ERR(spusched_task)) {
1062 err = PTR_ERR(spusched_task);
1063 goto out_free_spu_prio;
1066 mod_timer(&spuloadavg_timer, 0);
1068 entry = create_proc_entry("spu_loadavg", 0, NULL);
1070 goto out_stop_kthread;
1071 entry->proc_fops = &spu_loadavg_fops;
1073 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1074 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1078 kthread_stop(spusched_task);
1085 void spu_sched_exit(void)
1090 remove_proc_entry("spu_loadavg", NULL);
1092 del_timer_sync(&spusched_timer);
1093 del_timer_sync(&spuloadavg_timer);
1094 kthread_stop(spusched_task);
1096 for (node = 0; node < MAX_NUMNODES; node++) {
1097 mutex_lock(&cbe_spu_info[node].list_mutex);
1098 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1099 if (spu->alloc_state != SPU_FREE)
1100 spu->alloc_state = SPU_FREE;
1101 mutex_unlock(&cbe_spu_info[node].list_mutex);