1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/bootmem.h>
24 #include <linux/irq.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
32 #include <asm/iommu.h>
34 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
44 #include <asm/hypervisor.h>
45 #include <asm/cacheflush.h>
49 #define NUM_IVECS (IMAP_INR + 1)
51 struct ino_bucket *ivector_table;
52 unsigned long ivector_table_pa;
54 /* On several sun4u processors, it is illegal to mix bypass and
55 * non-bypass accesses. Therefore we access all INO buckets
56 * using bypass accesses only.
58 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
62 __asm__ __volatile__("ldxa [%1] %2, %0"
65 offsetof(struct ino_bucket,
67 "i" (ASI_PHYS_USE_EC));
72 static void bucket_clear_chain_pa(unsigned long bucket_pa)
74 __asm__ __volatile__("stxa %%g0, [%0] %1"
77 offsetof(struct ino_bucket,
79 "i" (ASI_PHYS_USE_EC));
82 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
86 __asm__ __volatile__("lduwa [%1] %2, %0"
89 offsetof(struct ino_bucket,
91 "i" (ASI_PHYS_USE_EC));
96 static void bucket_set_virt_irq(unsigned long bucket_pa,
97 unsigned int virt_irq)
99 __asm__ __volatile__("stwa %0, [%1] %2"
103 offsetof(struct ino_bucket,
105 "i" (ASI_PHYS_USE_EC));
108 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
111 unsigned int dev_handle;
112 unsigned int dev_ino;
114 } virt_irq_table[NR_IRQS];
115 static DEFINE_SPINLOCK(virt_irq_alloc_lock);
117 unsigned char virt_irq_alloc(unsigned int dev_handle,
118 unsigned int dev_ino)
123 BUILD_BUG_ON(NR_IRQS >= 256);
125 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
127 for (ent = 1; ent < NR_IRQS; ent++) {
128 if (!virt_irq_table[ent].in_use)
131 if (ent >= NR_IRQS) {
132 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
135 virt_irq_table[ent].dev_handle = dev_handle;
136 virt_irq_table[ent].dev_ino = dev_ino;
137 virt_irq_table[ent].in_use = 1;
140 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
145 #ifdef CONFIG_PCI_MSI
146 void virt_irq_free(unsigned int virt_irq)
150 if (virt_irq >= NR_IRQS)
153 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
155 virt_irq_table[virt_irq].in_use = 0;
157 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
162 * /proc/interrupts printing:
165 int show_interrupts(struct seq_file *p, void *v)
167 int i = *(loff_t *) v, j;
168 struct irqaction * action;
173 for_each_online_cpu(j)
174 seq_printf(p, "CPU%d ",j);
179 spin_lock_irqsave(&irq_desc[i].lock, flags);
180 action = irq_desc[i].action;
183 seq_printf(p, "%3d: ",i);
185 seq_printf(p, "%10u ", kstat_irqs(i));
187 for_each_online_cpu(j)
188 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
190 seq_printf(p, " %9s", irq_desc[i].chip->typename);
191 seq_printf(p, " %s", action->name);
193 for (action=action->next; action; action = action->next)
194 seq_printf(p, ", %s", action->name);
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
203 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
207 if (this_is_starfire) {
208 tid = starfire_translate(imap, cpuid);
209 tid <<= IMAP_TID_SHIFT;
212 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
215 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
216 if ((ver >> 32UL) == __JALAPENO_ID ||
217 (ver >> 32UL) == __SERRANO_ID) {
218 tid = cpuid << IMAP_TID_SHIFT;
219 tid &= IMAP_TID_JBUS;
221 unsigned int a = cpuid & 0x1f;
222 unsigned int n = (cpuid >> 5) & 0x1f;
224 tid = ((a << IMAP_AID_SHIFT) |
225 (n << IMAP_NID_SHIFT));
226 tid &= (IMAP_AID_SAFARI |
230 tid = cpuid << IMAP_TID_SHIFT;
238 struct irq_handler_data {
242 void (*pre_handler)(unsigned int, void *, void *);
248 static int irq_choose_cpu(unsigned int virt_irq)
253 cpumask_copy(&mask, irq_desc[virt_irq].affinity);
254 if (cpus_equal(mask, CPU_MASK_ALL)) {
255 static int irq_rover;
256 static DEFINE_SPINLOCK(irq_rover_lock);
259 /* Round-robin distribution... */
261 spin_lock_irqsave(&irq_rover_lock, flags);
263 while (!cpu_online(irq_rover)) {
264 if (++irq_rover >= NR_CPUS)
269 if (++irq_rover >= NR_CPUS)
271 } while (!cpu_online(irq_rover));
273 spin_unlock_irqrestore(&irq_rover_lock, flags);
277 cpus_and(tmp, cpu_online_map, mask);
282 cpuid = first_cpu(tmp);
288 static int irq_choose_cpu(unsigned int virt_irq)
290 return real_hard_smp_processor_id();
294 static void sun4u_irq_enable(unsigned int virt_irq)
296 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
299 unsigned long cpuid, imap, val;
302 cpuid = irq_choose_cpu(virt_irq);
305 tid = sun4u_compute_tid(imap, cpuid);
307 val = upa_readq(imap);
308 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
309 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
310 val |= tid | IMAP_VALID;
311 upa_writeq(val, imap);
312 upa_writeq(ICLR_IDLE, data->iclr);
316 static void sun4u_set_affinity(unsigned int virt_irq,
317 const struct cpumask *mask)
319 sun4u_irq_enable(virt_irq);
322 static void sun4u_irq_disable(unsigned int virt_irq)
324 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
327 unsigned long imap = data->imap;
328 unsigned long tmp = upa_readq(imap);
331 upa_writeq(tmp, imap);
335 static void sun4u_irq_eoi(unsigned int virt_irq)
337 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
338 struct irq_desc *desc = irq_desc + virt_irq;
340 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
344 upa_writeq(ICLR_IDLE, data->iclr);
347 static void sun4v_irq_enable(unsigned int virt_irq)
349 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
350 unsigned long cpuid = irq_choose_cpu(virt_irq);
353 err = sun4v_intr_settarget(ino, cpuid);
355 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
356 "err(%d)\n", ino, cpuid, err);
357 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
359 printk(KERN_ERR "sun4v_intr_setstate(%x): "
360 "err(%d)\n", ino, err);
361 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
363 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
367 static void sun4v_set_affinity(unsigned int virt_irq,
368 const struct cpumask *mask)
370 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
371 unsigned long cpuid = irq_choose_cpu(virt_irq);
374 err = sun4v_intr_settarget(ino, cpuid);
376 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
377 "err(%d)\n", ino, cpuid, err);
380 static void sun4v_irq_disable(unsigned int virt_irq)
382 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
385 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
387 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
388 "err(%d)\n", ino, err);
391 static void sun4v_irq_eoi(unsigned int virt_irq)
393 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
394 struct irq_desc *desc = irq_desc + virt_irq;
397 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
400 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
402 printk(KERN_ERR "sun4v_intr_setstate(%x): "
403 "err(%d)\n", ino, err);
406 static void sun4v_virq_enable(unsigned int virt_irq)
408 unsigned long cpuid, dev_handle, dev_ino;
411 cpuid = irq_choose_cpu(virt_irq);
413 dev_handle = virt_irq_table[virt_irq].dev_handle;
414 dev_ino = virt_irq_table[virt_irq].dev_ino;
416 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
418 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
420 dev_handle, dev_ino, cpuid, err);
421 err = sun4v_vintr_set_state(dev_handle, dev_ino,
424 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
425 "HV_INTR_STATE_IDLE): err(%d)\n",
426 dev_handle, dev_ino, err);
427 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
430 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
431 "HV_INTR_ENABLED): err(%d)\n",
432 dev_handle, dev_ino, err);
435 static void sun4v_virt_set_affinity(unsigned int virt_irq,
436 const struct cpumask *mask)
438 unsigned long cpuid, dev_handle, dev_ino;
441 cpuid = irq_choose_cpu(virt_irq);
443 dev_handle = virt_irq_table[virt_irq].dev_handle;
444 dev_ino = virt_irq_table[virt_irq].dev_ino;
446 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
448 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
450 dev_handle, dev_ino, cpuid, err);
453 static void sun4v_virq_disable(unsigned int virt_irq)
455 unsigned long dev_handle, dev_ino;
458 dev_handle = virt_irq_table[virt_irq].dev_handle;
459 dev_ino = virt_irq_table[virt_irq].dev_ino;
461 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
464 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
465 "HV_INTR_DISABLED): err(%d)\n",
466 dev_handle, dev_ino, err);
469 static void sun4v_virq_eoi(unsigned int virt_irq)
471 struct irq_desc *desc = irq_desc + virt_irq;
472 unsigned long dev_handle, dev_ino;
475 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
478 dev_handle = virt_irq_table[virt_irq].dev_handle;
479 dev_ino = virt_irq_table[virt_irq].dev_ino;
481 err = sun4v_vintr_set_state(dev_handle, dev_ino,
484 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
485 "HV_INTR_STATE_IDLE): err(%d)\n",
486 dev_handle, dev_ino, err);
489 static struct irq_chip sun4u_irq = {
491 .enable = sun4u_irq_enable,
492 .disable = sun4u_irq_disable,
493 .eoi = sun4u_irq_eoi,
494 .set_affinity = sun4u_set_affinity,
497 static struct irq_chip sun4v_irq = {
499 .enable = sun4v_irq_enable,
500 .disable = sun4v_irq_disable,
501 .eoi = sun4v_irq_eoi,
502 .set_affinity = sun4v_set_affinity,
505 static struct irq_chip sun4v_virq = {
506 .typename = "vsun4v",
507 .enable = sun4v_virq_enable,
508 .disable = sun4v_virq_disable,
509 .eoi = sun4v_virq_eoi,
510 .set_affinity = sun4v_virt_set_affinity,
513 static void pre_flow_handler(unsigned int virt_irq,
514 struct irq_desc *desc)
516 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
517 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
519 data->pre_handler(ino, data->arg1, data->arg2);
521 handle_fasteoi_irq(virt_irq, desc);
524 void irq_install_pre_handler(int virt_irq,
525 void (*func)(unsigned int, void *, void *),
526 void *arg1, void *arg2)
528 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
529 struct irq_desc *desc = irq_desc + virt_irq;
531 data->pre_handler = func;
535 desc->handle_irq = pre_flow_handler;
538 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
540 struct ino_bucket *bucket;
541 struct irq_handler_data *data;
542 unsigned int virt_irq;
545 BUG_ON(tlb_type == hypervisor);
547 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
548 bucket = &ivector_table[ino];
549 virt_irq = bucket_get_virt_irq(__pa(bucket));
551 virt_irq = virt_irq_alloc(0, ino);
552 bucket_set_virt_irq(__pa(bucket), virt_irq);
553 set_irq_chip_and_handler_name(virt_irq,
559 data = get_irq_chip_data(virt_irq);
563 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
564 if (unlikely(!data)) {
565 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
568 set_irq_chip_data(virt_irq, data);
577 static unsigned int sun4v_build_common(unsigned long sysino,
578 struct irq_chip *chip)
580 struct ino_bucket *bucket;
581 struct irq_handler_data *data;
582 unsigned int virt_irq;
584 BUG_ON(tlb_type != hypervisor);
586 bucket = &ivector_table[sysino];
587 virt_irq = bucket_get_virt_irq(__pa(bucket));
589 virt_irq = virt_irq_alloc(0, sysino);
590 bucket_set_virt_irq(__pa(bucket), virt_irq);
591 set_irq_chip_and_handler_name(virt_irq, chip,
596 data = get_irq_chip_data(virt_irq);
600 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
601 if (unlikely(!data)) {
602 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
605 set_irq_chip_data(virt_irq, data);
607 /* Catch accidental accesses to these things. IMAP/ICLR handling
608 * is done by hypervisor calls on sun4v platforms, not by direct
618 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
620 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
622 return sun4v_build_common(sysino, &sun4v_irq);
625 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
627 struct irq_handler_data *data;
628 unsigned long hv_err, cookie;
629 struct ino_bucket *bucket;
630 struct irq_desc *desc;
631 unsigned int virt_irq;
633 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
634 if (unlikely(!bucket))
636 __flush_dcache_range((unsigned long) bucket,
637 ((unsigned long) bucket +
638 sizeof(struct ino_bucket)));
640 virt_irq = virt_irq_alloc(devhandle, devino);
641 bucket_set_virt_irq(__pa(bucket), virt_irq);
643 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
647 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
651 /* In order to make the LDC channel startup sequence easier,
652 * especially wrt. locking, we do not let request_irq() enable
655 desc = irq_desc + virt_irq;
656 desc->status |= IRQ_NOAUTOEN;
658 set_irq_chip_data(virt_irq, data);
660 /* Catch accidental accesses to these things. IMAP/ICLR handling
661 * is done by hypervisor calls on sun4v platforms, not by direct
667 cookie = ~__pa(bucket);
668 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
670 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
671 "err=%lu\n", devhandle, devino, hv_err);
678 void ack_bad_irq(unsigned int virt_irq)
680 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
685 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
689 void *hardirq_stack[NR_CPUS];
690 void *softirq_stack[NR_CPUS];
692 static __attribute__((always_inline)) void *set_hardirq_stack(void)
694 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
696 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
698 orig_sp > (sp + THREAD_SIZE)) {
699 sp += THREAD_SIZE - 192 - STACK_BIAS;
700 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
705 static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
707 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
710 void handler_irq(int irq, struct pt_regs *regs)
712 unsigned long pstate, bucket_pa;
713 struct pt_regs *old_regs;
716 clear_softint(1 << irq);
718 old_regs = set_irq_regs(regs);
721 /* Grab an atomic snapshot of the pending IVECs. */
722 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
723 "wrpr %0, %3, %%pstate\n\t"
726 "wrpr %0, 0x0, %%pstate\n\t"
727 : "=&r" (pstate), "=&r" (bucket_pa)
728 : "r" (irq_work_pa(smp_processor_id())),
732 orig_sp = set_hardirq_stack();
735 struct irq_desc *desc;
736 unsigned long next_pa;
737 unsigned int virt_irq;
739 next_pa = bucket_get_chain_pa(bucket_pa);
740 virt_irq = bucket_get_virt_irq(bucket_pa);
741 bucket_clear_chain_pa(bucket_pa);
743 desc = irq_desc + virt_irq;
745 desc->handle_irq(virt_irq, desc);
750 restore_hardirq_stack(orig_sp);
753 set_irq_regs(old_regs);
756 void do_softirq(void)
763 local_irq_save(flags);
765 if (local_softirq_pending()) {
766 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
768 sp += THREAD_SIZE - 192 - STACK_BIAS;
770 __asm__ __volatile__("mov %%sp, %0\n\t"
775 __asm__ __volatile__("mov %0, %%sp"
779 local_irq_restore(flags);
782 static void unhandled_perf_irq(struct pt_regs *regs)
784 unsigned long pcr, pic;
791 printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
793 printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
794 smp_processor_id(), pcr, pic);
797 /* Almost a direct copy of the powerpc PMC code. */
798 static DEFINE_SPINLOCK(perf_irq_lock);
799 static void *perf_irq_owner_caller; /* mostly for debugging */
800 static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
802 /* Invoked from level 15 PIL handler in trap table. */
803 void perfctr_irq(int irq, struct pt_regs *regs)
805 clear_softint(1 << irq);
809 int register_perfctr_intr(void (*handler)(struct pt_regs *))
816 spin_lock(&perf_irq_lock);
817 if (perf_irq != unhandled_perf_irq) {
818 printk(KERN_WARNING "register_perfctr_intr: "
819 "perf IRQ busy (reserved by caller %p)\n",
820 perf_irq_owner_caller);
825 perf_irq_owner_caller = __builtin_return_address(0);
830 spin_unlock(&perf_irq_lock);
834 EXPORT_SYMBOL_GPL(register_perfctr_intr);
836 void release_perfctr_intr(void (*handler)(struct pt_regs *))
838 spin_lock(&perf_irq_lock);
839 perf_irq_owner_caller = NULL;
840 perf_irq = unhandled_perf_irq;
841 spin_unlock(&perf_irq_lock);
843 EXPORT_SYMBOL_GPL(release_perfctr_intr);
845 #ifdef CONFIG_HOTPLUG_CPU
846 void fixup_irqs(void)
850 for (irq = 0; irq < NR_IRQS; irq++) {
853 spin_lock_irqsave(&irq_desc[irq].lock, flags);
854 if (irq_desc[irq].action &&
855 !(irq_desc[irq].status & IRQ_PER_CPU)) {
856 if (irq_desc[irq].chip->set_affinity)
857 irq_desc[irq].chip->set_affinity(irq,
858 irq_desc[irq].affinity);
860 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
863 tick_ops->disable_irq();
874 static struct sun5_timer *prom_timers;
875 static u64 prom_limit0, prom_limit1;
877 static void map_prom_timers(void)
879 struct device_node *dp;
880 const unsigned int *addr;
882 /* PROM timer node hangs out in the top level of device siblings... */
883 dp = of_find_node_by_path("/");
886 if (!strcmp(dp->name, "counter-timer"))
891 /* Assume if node is not present, PROM uses different tick mechanism
892 * which we should not care about.
895 prom_timers = (struct sun5_timer *) 0;
899 /* If PROM is really using this, it must be mapped by him. */
900 addr = of_get_property(dp, "address", NULL);
902 prom_printf("PROM does not have timer mapped, trying to continue.\n");
903 prom_timers = (struct sun5_timer *) 0;
906 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
909 static void kill_prom_timer(void)
914 /* Save them away for later. */
915 prom_limit0 = prom_timers->limit0;
916 prom_limit1 = prom_timers->limit1;
918 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
919 * We turn both off here just to be paranoid.
921 prom_timers->limit0 = 0;
922 prom_timers->limit1 = 0;
924 /* Wheee, eat the interrupt packet too... */
925 __asm__ __volatile__(
927 " ldxa [%%g0] %0, %%g1\n"
928 " ldxa [%%g2] %1, %%g1\n"
929 " stxa %%g0, [%%g0] %0\n"
932 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
936 void notrace init_irqwork_curcpu(void)
938 int cpu = hard_smp_processor_id();
940 trap_block[cpu].irq_worklist_pa = 0UL;
943 /* Please be very careful with register_one_mondo() and
944 * sun4v_register_mondo_queues().
946 * On SMP this gets invoked from the CPU trampoline before
947 * the cpu has fully taken over the trap table from OBP,
948 * and it's kernel stack + %g6 thread register state is
949 * not fully cooked yet.
951 * Therefore you cannot make any OBP calls, not even prom_printf,
952 * from these two routines.
954 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
956 unsigned long num_entries = (qmask + 1) / 64;
957 unsigned long status;
959 status = sun4v_cpu_qconf(type, paddr, num_entries);
960 if (status != HV_EOK) {
961 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
962 "err %lu\n", type, paddr, num_entries, status);
967 void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
969 struct trap_per_cpu *tb = &trap_block[this_cpu];
971 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
972 tb->cpu_mondo_qmask);
973 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
974 tb->dev_mondo_qmask);
975 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
977 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
981 static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
983 unsigned long size = PAGE_ALIGN(qmask + 1);
984 void *p = __alloc_bootmem(size, size, 0);
986 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
993 static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
995 unsigned long size = PAGE_ALIGN(qmask + 1);
996 void *p = __alloc_bootmem(size, size, 0);
999 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
1006 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1011 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
1013 page = alloc_bootmem_pages(PAGE_SIZE);
1015 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1019 tb->cpu_mondo_block_pa = __pa(page);
1020 tb->cpu_list_pa = __pa(page + 64);
1024 /* Allocate mondo and error queues for all possible cpus. */
1025 static void __init sun4v_init_mondo_queues(void)
1029 for_each_possible_cpu(cpu) {
1030 struct trap_per_cpu *tb = &trap_block[cpu];
1032 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
1033 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
1034 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
1035 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
1036 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
1037 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
1038 tb->nonresum_qmask);
1042 static void __init init_send_mondo_info(void)
1046 for_each_possible_cpu(cpu) {
1047 struct trap_per_cpu *tb = &trap_block[cpu];
1049 init_cpu_send_mondo_info(tb);
1053 static struct irqaction timer_irq_action = {
1057 /* Only invoked on boot processor. */
1058 void __init init_IRQ(void)
1065 size = sizeof(struct ino_bucket) * NUM_IVECS;
1066 ivector_table = alloc_bootmem(size);
1067 if (!ivector_table) {
1068 prom_printf("Fatal error, cannot allocate ivector_table\n");
1071 __flush_dcache_range((unsigned long) ivector_table,
1072 ((unsigned long) ivector_table) + size);
1074 ivector_table_pa = __pa(ivector_table);
1076 if (tlb_type == hypervisor)
1077 sun4v_init_mondo_queues();
1079 init_send_mondo_info();
1081 if (tlb_type == hypervisor) {
1082 /* Load up the boot cpu's entries. */
1083 sun4v_register_mondo_queues(hard_smp_processor_id());
1086 /* We need to clear any IRQ's pending in the soft interrupt
1087 * registers, a spurious one could be left around from the
1088 * PROM timer which we just disabled.
1090 clear_softint(get_softint());
1092 /* Now that ivector table is initialized, it is safe
1093 * to receive IRQ vector traps. We will normally take
1094 * one or two right now, in case some device PROM used
1095 * to boot us wants to speak to us. We just ignore them.
1097 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1098 "or %%g1, %0, %%g1\n\t"
1099 "wrpr %%g1, 0x0, %%pstate"
1104 irq_desc[0].action = &timer_irq_action;