2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/linux_logo.h>
36 #include <asm/spu_priv1.h>
37 #include <asm/spu_csa.h>
41 const struct spu_management_ops *spu_management_ops;
42 EXPORT_SYMBOL_GPL(spu_management_ops);
44 const struct spu_priv1_ops *spu_priv1_ops;
45 EXPORT_SYMBOL_GPL(spu_priv1_ops);
47 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
48 EXPORT_SYMBOL_GPL(cbe_spu_info);
51 * The spufs fault-handling code needs to call force_sig_info to raise signals
52 * on DMA errors. Export it here to avoid general kernel-wide access to this
55 EXPORT_SYMBOL_GPL(force_sig_info);
58 * Protects cbe_spu_info and spu->number.
60 static DEFINE_SPINLOCK(spu_lock);
63 * List of all spus in the system.
65 * This list is iterated by callers from irq context and callers that
66 * want to sleep. Thus modifications need to be done with both
67 * spu_full_list_lock and spu_full_list_mutex held, while iterating
68 * through it requires either of these locks.
70 * In addition spu_full_list_lock protects all assignmens to
73 static LIST_HEAD(spu_full_list);
74 static DEFINE_SPINLOCK(spu_full_list_lock);
75 static DEFINE_MUTEX(spu_full_list_mutex);
81 void spu_invalidate_slbs(struct spu *spu)
83 struct spu_priv2 __iomem *priv2 = spu->priv2;
85 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
86 out_be64(&priv2->slb_invalidate_all_W, 0UL);
88 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
90 /* This is called by the MM core when a segment size is changed, to
91 * request a flush of all the SPEs using a given mm
93 void spu_flush_all_slbs(struct mm_struct *mm)
98 spin_lock_irqsave(&spu_full_list_lock, flags);
99 list_for_each_entry(spu, &spu_full_list, full_list) {
101 spu_invalidate_slbs(spu);
103 spin_unlock_irqrestore(&spu_full_list_lock, flags);
106 /* The hack below stinks... try to do something better one of
107 * these days... Does it even work properly with NR_CPUS == 1 ?
109 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
111 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
113 /* Global TLBIE broadcast required with SPEs. */
114 __cpus_setall(&mm->cpu_vm_mask, nr);
117 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
121 spin_lock_irqsave(&spu_full_list_lock, flags);
123 spin_unlock_irqrestore(&spu_full_list_lock, flags);
125 mm_needs_global_tlbie(mm);
127 EXPORT_SYMBOL_GPL(spu_associate_mm);
129 int spu_64k_pages_available(void)
131 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
133 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
135 static int __spu_trap_invalid_dma(struct spu *spu)
137 pr_debug("%s\n", __FUNCTION__);
138 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
142 static int __spu_trap_dma_align(struct spu *spu)
144 pr_debug("%s\n", __FUNCTION__);
145 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
149 static int __spu_trap_error(struct spu *spu)
151 pr_debug("%s\n", __FUNCTION__);
152 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
156 static void spu_restart_dma(struct spu *spu)
158 struct spu_priv2 __iomem *priv2 = spu->priv2;
160 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
161 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
164 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
166 struct spu_priv2 __iomem *priv2 = spu->priv2;
168 pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
169 __func__, slbe, slb->vsid, slb->esid);
171 out_be64(&priv2->slb_index_W, slbe);
172 out_be64(&priv2->slb_vsid_RW, slb->vsid);
173 out_be64(&priv2->slb_esid_RW, slb->esid);
176 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
178 struct mm_struct *mm = spu->mm;
182 pr_debug("%s\n", __FUNCTION__);
184 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
185 /* SLBs are pre-loaded for context switch, so
186 * we should never get here!
188 printk("%s: invalid access during switch!\n", __func__);
191 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
193 switch(REGION_ID(ea)) {
195 #ifdef CONFIG_PPC_MM_SLICES
196 psize = get_slice_psize(mm, ea);
198 psize = mm->context.user_psize;
200 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
201 << SLB_VSID_SHIFT) | SLB_VSID_USER;
203 case VMALLOC_REGION_ID:
204 if (ea < VMALLOC_END)
205 psize = mmu_vmalloc_psize;
207 psize = mmu_io_psize;
208 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
209 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
211 case KERNEL_REGION_ID:
212 psize = mmu_linear_psize;
213 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
214 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
217 /* Future: support kernel segments so that drivers
220 pr_debug("invalid region access at %016lx\n", ea);
223 slb.vsid |= mmu_psize_defs[psize].sllp;
225 spu_load_slb(spu, spu->slb_replace, &slb);
228 if (spu->slb_replace >= 8)
229 spu->slb_replace = 0;
231 spu_restart_dma(spu);
232 spu->stats.slb_flt++;
236 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
237 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
239 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
241 /* Handle kernel space hash faults immediately.
242 User hash faults need to be deferred to process context. */
243 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
244 && REGION_ID(ea) != USER_REGION_ID
245 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
246 spu_restart_dma(spu);
250 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
251 printk("%s: invalid access during switch!\n", __func__);
258 spu->stop_callback(spu);
262 static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
264 unsigned long ea = (unsigned long)addr;
267 if (REGION_ID(ea) == KERNEL_REGION_ID)
268 llp = mmu_psize_defs[mmu_linear_psize].sllp;
270 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
272 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
273 SLB_VSID_KERNEL | llp;
274 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
278 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
279 * address @new_addr is present.
281 static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
284 unsigned long ea = (unsigned long)new_addr;
287 for (i = 0; i < nr_slbs; i++)
288 if (!((slbs[i].esid ^ ea) & ESID_MASK))
295 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
296 * need to map both the context save area, and the save/restore code.
298 * Because the lscsa and code may cross segment boundaires, we check to see
299 * if mappings are required for the start and end of each range. We currently
300 * assume that the mappings are smaller that one segment - if not, something
301 * is seriously wrong.
303 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
304 void *code, int code_size)
306 struct spu_slb slbs[4];
308 /* start and end addresses of both mappings */
310 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
311 code, code + code_size - 1
314 /* check the set of addresses, and create a new entry in the slbs array
315 * if there isn't already a SLB for that address */
316 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
317 if (__slb_present(slbs, nr_slbs, addrs[i]))
320 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
324 /* Add the set of SLBs */
325 for (i = 0; i < nr_slbs; i++)
326 spu_load_slb(spu, i, &slbs[i]);
328 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
331 spu_irq_class_0(int irq, void *data)
334 unsigned long stat, mask;
338 mask = spu_int_mask_get(spu, 0);
339 stat = spu_int_stat_get(spu, 0);
342 spin_lock(&spu->register_lock);
343 spu->class_0_pending |= stat;
344 spin_unlock(&spu->register_lock);
346 spu->stop_callback(spu);
348 spu_int_stat_clear(spu, 0, stat);
354 spu_irq_class_0_bottom(struct spu *spu)
359 spin_lock_irqsave(&spu->register_lock, flags);
360 stat = spu->class_0_pending;
361 spu->class_0_pending = 0;
363 if (stat & 1) /* invalid DMA alignment */
364 __spu_trap_dma_align(spu);
366 if (stat & 2) /* invalid MFC DMA */
367 __spu_trap_invalid_dma(spu);
369 if (stat & 4) /* error on SPU */
370 __spu_trap_error(spu);
372 spin_unlock_irqrestore(&spu->register_lock, flags);
374 return (stat & 0x7) ? -EIO : 0;
376 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
379 spu_irq_class_1(int irq, void *data)
382 unsigned long stat, mask, dar, dsisr;
386 /* atomically read & clear class1 status. */
387 spin_lock(&spu->register_lock);
388 mask = spu_int_mask_get(spu, 1);
389 stat = spu_int_stat_get(spu, 1) & mask;
390 dar = spu_mfc_dar_get(spu);
391 dsisr = spu_mfc_dsisr_get(spu);
392 if (stat & 2) /* mapping fault */
393 spu_mfc_dsisr_set(spu, 0ul);
394 spu_int_stat_clear(spu, 1, stat);
395 spin_unlock(&spu->register_lock);
396 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
399 if (stat & 1) /* segment fault */
400 __spu_trap_data_seg(spu, dar);
402 if (stat & 2) { /* mapping fault */
403 __spu_trap_data_map(spu, dar, dsisr);
406 if (stat & 4) /* ls compare & suspend on get */
409 if (stat & 8) /* ls compare & suspend on put */
412 return stat ? IRQ_HANDLED : IRQ_NONE;
416 spu_irq_class_2(int irq, void *data)
423 spin_lock(&spu->register_lock);
424 stat = spu_int_stat_get(spu, 2);
425 mask = spu_int_mask_get(spu, 2);
426 /* ignore interrupts we're not waiting for */
429 * mailbox interrupts (0x1 and 0x10) are level triggered.
430 * mask them now before acknowledging.
433 spu_int_mask_and(spu, 2, ~(stat & 0x11));
434 /* acknowledge all interrupts before the callbacks */
435 spu_int_stat_clear(spu, 2, stat);
436 spin_unlock(&spu->register_lock);
438 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
440 if (stat & 1) /* PPC core mailbox */
441 spu->ibox_callback(spu);
443 if (stat & 2) /* SPU stop-and-signal */
444 spu->stop_callback(spu);
446 if (stat & 4) /* SPU halted */
447 spu->stop_callback(spu);
449 if (stat & 8) /* DMA tag group complete */
450 spu->mfc_callback(spu);
452 if (stat & 0x10) /* SPU mailbox threshold */
453 spu->wbox_callback(spu);
455 spu->stats.class2_intr++;
456 return stat ? IRQ_HANDLED : IRQ_NONE;
459 static int spu_request_irqs(struct spu *spu)
463 if (spu->irqs[0] != NO_IRQ) {
464 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
466 ret = request_irq(spu->irqs[0], spu_irq_class_0,
472 if (spu->irqs[1] != NO_IRQ) {
473 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
475 ret = request_irq(spu->irqs[1], spu_irq_class_1,
481 if (spu->irqs[2] != NO_IRQ) {
482 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
484 ret = request_irq(spu->irqs[2], spu_irq_class_2,
493 if (spu->irqs[1] != NO_IRQ)
494 free_irq(spu->irqs[1], spu);
496 if (spu->irqs[0] != NO_IRQ)
497 free_irq(spu->irqs[0], spu);
502 static void spu_free_irqs(struct spu *spu)
504 if (spu->irqs[0] != NO_IRQ)
505 free_irq(spu->irqs[0], spu);
506 if (spu->irqs[1] != NO_IRQ)
507 free_irq(spu->irqs[1], spu);
508 if (spu->irqs[2] != NO_IRQ)
509 free_irq(spu->irqs[2], spu);
512 void spu_init_channels(struct spu *spu)
514 static const struct {
518 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
519 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
521 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
522 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
523 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
525 struct spu_priv2 __iomem *priv2;
530 /* initialize all channel data to zero */
531 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
534 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
535 for (count = 0; count < zero_list[i].count; count++)
536 out_be64(&priv2->spu_chnldata_RW, 0);
539 /* initialize channel counts to meaningful values */
540 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
541 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
542 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
545 EXPORT_SYMBOL_GPL(spu_init_channels);
547 static int spu_shutdown(struct sys_device *sysdev)
549 struct spu *spu = container_of(sysdev, struct spu, sysdev);
552 spu_destroy_spu(spu);
556 static struct sysdev_class spu_sysdev_class = {
557 set_kset_name("spu"),
558 .shutdown = spu_shutdown,
561 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
565 mutex_lock(&spu_full_list_mutex);
566 list_for_each_entry(spu, &spu_full_list, full_list)
567 sysdev_create_file(&spu->sysdev, attr);
568 mutex_unlock(&spu_full_list_mutex);
572 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
574 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
579 mutex_lock(&spu_full_list_mutex);
580 list_for_each_entry(spu, &spu_full_list, full_list) {
581 rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
583 /* we're in trouble here, but try unwinding anyway */
585 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
586 __func__, attrs->name);
588 list_for_each_entry_continue_reverse(spu,
589 &spu_full_list, full_list)
590 sysfs_remove_group(&spu->sysdev.kobj, attrs);
595 mutex_unlock(&spu_full_list_mutex);
599 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
602 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
606 mutex_lock(&spu_full_list_mutex);
607 list_for_each_entry(spu, &spu_full_list, full_list)
608 sysdev_remove_file(&spu->sysdev, attr);
609 mutex_unlock(&spu_full_list_mutex);
611 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
613 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
617 mutex_lock(&spu_full_list_mutex);
618 list_for_each_entry(spu, &spu_full_list, full_list)
619 sysfs_remove_group(&spu->sysdev.kobj, attrs);
620 mutex_unlock(&spu_full_list_mutex);
622 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
624 static int spu_create_sysdev(struct spu *spu)
628 spu->sysdev.id = spu->number;
629 spu->sysdev.cls = &spu_sysdev_class;
630 ret = sysdev_register(&spu->sysdev);
632 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
637 sysfs_add_device_to_node(&spu->sysdev, spu->node);
642 static int __init create_spu(void *data)
651 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
655 spu->alloc_state = SPU_FREE;
657 spin_lock_init(&spu->register_lock);
658 spin_lock(&spu_lock);
659 spu->number = number++;
660 spin_unlock(&spu_lock);
662 ret = spu_create_spu(spu, data);
667 spu_mfc_sdr_setup(spu);
668 spu_mfc_sr1_set(spu, 0x33);
669 ret = spu_request_irqs(spu);
673 ret = spu_create_sysdev(spu);
677 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
678 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
679 cbe_spu_info[spu->node].n_spus++;
680 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
682 mutex_lock(&spu_full_list_mutex);
683 spin_lock_irqsave(&spu_full_list_lock, flags);
684 list_add(&spu->full_list, &spu_full_list);
685 spin_unlock_irqrestore(&spu_full_list_lock, flags);
686 mutex_unlock(&spu_full_list_mutex);
688 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
690 spu->stats.tstamp = timespec_to_ns(&ts);
692 INIT_LIST_HEAD(&spu->aff_list);
699 spu_destroy_spu(spu);
706 static const char *spu_state_names[] = {
707 "user", "system", "iowait", "idle"
710 static unsigned long long spu_acct_time(struct spu *spu,
711 enum spu_utilization_state state)
714 unsigned long long time = spu->stats.times[state];
717 * If the spu is idle or the context is stopped, utilization
718 * statistics are not updated. Apply the time delta from the
719 * last recorded state of the spu.
721 if (spu->stats.util_state == state) {
723 time += timespec_to_ns(&ts) - spu->stats.tstamp;
726 return time / NSEC_PER_MSEC;
730 static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
732 struct spu *spu = container_of(sysdev, struct spu, sysdev);
734 return sprintf(buf, "%s %llu %llu %llu %llu "
735 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
736 spu_state_names[spu->stats.util_state],
737 spu_acct_time(spu, SPU_UTIL_USER),
738 spu_acct_time(spu, SPU_UTIL_SYSTEM),
739 spu_acct_time(spu, SPU_UTIL_IOWAIT),
740 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
741 spu->stats.vol_ctx_switch,
742 spu->stats.invol_ctx_switch,
747 spu->stats.class2_intr,
748 spu->stats.libassist);
751 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
753 static int __init init_spu_base(void)
757 for (i = 0; i < MAX_NUMNODES; i++) {
758 mutex_init(&cbe_spu_info[i].list_mutex);
759 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
762 if (!spu_management_ops)
765 /* create sysdev class for spus */
766 ret = sysdev_class_register(&spu_sysdev_class);
770 ret = spu_enumerate_spus(create_spu);
773 printk(KERN_WARNING "%s: Error initializing spus\n",
775 goto out_unregister_sysdev_class;
780 * We cannot put the forward declaration in
781 * <linux/linux_logo.h> because of conflicting session type
782 * conflicts for const and __initdata with different compiler
785 extern const struct linux_logo logo_spe_clut224;
787 fb_append_extra_logo(&logo_spe_clut224, ret);
790 mutex_lock(&spu_full_list_mutex);
791 xmon_register_spus(&spu_full_list);
792 crash_register_spus(&spu_full_list);
793 mutex_unlock(&spu_full_list_mutex);
794 spu_add_sysdev_attr(&attr_stat);
800 out_unregister_sysdev_class:
801 sysdev_class_unregister(&spu_sysdev_class);
805 module_init(init_spu_base);
807 MODULE_LICENSE("GPL");
808 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");