2 * arch/powerpc/platforms/pseries/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
24 #include <asm/firmware.h>
27 #include <asm/pgtable.h>
30 #include <asm/hvcall.h>
31 #include <asm/machdep.h>
32 #include <asm/i8259.h>
35 #include "plpar_wrappers.h"
37 static struct irq_host *xics_host;
40 #define XICS_IRQ_SPURIOUS 0
42 /* Want a priority other than 0. Various HW issues require this. */
43 #define DEFAULT_PRIORITY 5
46 * Mark IPIs as higher priority so we can take them inside interrupts that
47 * arent marked IRQF_DISABLED
49 #define IPI_PRIORITY 4
51 static unsigned int default_server = 0xFF;
52 static unsigned int default_distrib_server = 0;
53 static unsigned int interrupt_server_size = 8;
55 /* RTAS service tokens */
56 static int ibm_get_xive;
57 static int ibm_set_xive;
58 static int ibm_int_on;
59 static int ibm_int_off;
62 /* Direct hardware low level accessors */
64 /* The part of the interrupt presentation layer that we care about */
81 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
83 static inline unsigned int direct_xirr_info_get(void)
85 int cpu = smp_processor_id();
87 return in_be32(&xics_per_cpu[cpu]->xirr.word);
90 static inline void direct_xirr_info_set(unsigned int value)
92 int cpu = smp_processor_id();
94 out_be32(&xics_per_cpu[cpu]->xirr.word, value);
97 static inline void direct_cppr_info(u8 value)
99 int cpu = smp_processor_id();
101 out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
104 static inline void direct_qirr_info(int n_cpu, u8 value)
106 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
110 /* LPAR low level accessors */
112 static inline unsigned int lpar_xirr_info_get(void)
114 unsigned long lpar_rc;
115 unsigned long return_value;
117 lpar_rc = plpar_xirr(&return_value);
118 if (lpar_rc != H_SUCCESS)
119 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
120 return (unsigned int)return_value;
123 static inline void lpar_xirr_info_set(unsigned int value)
125 unsigned long lpar_rc;
127 lpar_rc = plpar_eoi(value);
128 if (lpar_rc != H_SUCCESS)
129 panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc,
133 static inline void lpar_cppr_info(u8 value)
135 unsigned long lpar_rc;
137 lpar_rc = plpar_cppr(value);
138 if (lpar_rc != H_SUCCESS)
139 panic("bad return code cppr - rc = %lx\n", lpar_rc);
142 static inline void lpar_qirr_info(int n_cpu , u8 value)
144 unsigned long lpar_rc;
146 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
147 if (lpar_rc != H_SUCCESS)
148 panic("bad return code qirr - rc = %lx\n", lpar_rc);
152 /* Interface to generic irq subsystem */
155 static int get_irq_server(unsigned int virq, unsigned int strict_check)
158 /* For the moment only implement delivery to all cpus or one cpu */
159 cpumask_t cpumask = irq_desc[virq].affinity;
160 cpumask_t tmp = CPU_MASK_NONE;
162 if (!distribute_irqs)
163 return default_server;
165 if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
166 cpus_and(tmp, cpu_online_map, cpumask);
168 server = first_cpu(tmp);
170 if (server < NR_CPUS)
171 return get_hard_smp_processor_id(server);
177 if (cpus_equal(cpu_online_map, cpu_present_map))
178 return default_distrib_server;
180 return default_server;
183 static int get_irq_server(unsigned int virq, unsigned int strict_check)
185 return default_server;
189 static void xics_unmask_irq(unsigned int virq)
195 pr_debug("xics: unmask virq %d\n", virq);
197 irq = (unsigned int)irq_map[virq].hwirq;
198 pr_debug(" -> map to hwirq 0x%x\n", irq);
199 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
202 server = get_irq_server(virq, 0);
204 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
206 if (call_status != 0) {
207 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
208 "returned %d\n", irq, call_status);
209 printk("set_xive %x, server %x\n", ibm_set_xive, server);
213 /* Now unmask the interrupt (often a no-op) */
214 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
215 if (call_status != 0) {
216 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
217 "returned %d\n", irq, call_status);
222 static unsigned int xics_startup(unsigned int virq)
225 xics_unmask_irq(virq);
229 static void xics_mask_real_irq(unsigned int irq)
236 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
237 if (call_status != 0) {
238 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
239 "ibm_int_off returned %d\n", irq, call_status);
243 /* Have to set XIVE to 0xff to be able to remove a slot */
244 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
245 default_server, 0xff);
246 if (call_status != 0) {
247 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
248 " returned %d\n", irq, call_status);
253 static void xics_mask_irq(unsigned int virq)
257 pr_debug("xics: mask virq %d\n", virq);
259 irq = (unsigned int)irq_map[virq].hwirq;
260 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
262 xics_mask_real_irq(irq);
265 static void xics_mask_unknown_vec(unsigned int vec)
267 printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
268 xics_mask_real_irq(vec);
271 static inline unsigned int xics_xirr_vector(unsigned int xirr)
274 * The top byte is the old cppr, to be restored on EOI.
275 * The remaining 24 bits are the vector.
277 return xirr & 0x00ffffff;
280 static unsigned int xics_get_irq_direct(void)
282 unsigned int xirr = direct_xirr_info_get();
283 unsigned int vec = xics_xirr_vector(xirr);
286 if (vec == XICS_IRQ_SPURIOUS)
289 irq = irq_radix_revmap_lookup(xics_host, vec);
290 if (likely(irq != NO_IRQ))
293 /* We don't have a linux mapping, so have rtas mask it. */
294 xics_mask_unknown_vec(vec);
296 /* We might learn about it later, so EOI it */
297 direct_xirr_info_set(xirr);
301 static unsigned int xics_get_irq_lpar(void)
303 unsigned int xirr = lpar_xirr_info_get();
304 unsigned int vec = xics_xirr_vector(xirr);
307 if (vec == XICS_IRQ_SPURIOUS)
310 irq = irq_radix_revmap_lookup(xics_host, vec);
311 if (likely(irq != NO_IRQ))
314 /* We don't have a linux mapping, so have RTAS mask it. */
315 xics_mask_unknown_vec(vec);
317 /* We might learn about it later, so EOI it */
318 lpar_xirr_info_set(xirr);
322 static void xics_eoi_direct(unsigned int virq)
324 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
327 direct_xirr_info_set((0xff << 24) | irq);
330 static void xics_eoi_lpar(unsigned int virq)
332 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
335 lpar_xirr_info_set((0xff << 24) | irq);
338 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
345 irq = (unsigned int)irq_map[virq].hwirq;
346 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
349 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
352 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
353 "returns %d\n", irq, status);
358 * For the moment only implement delivery to all cpus or one cpu.
359 * Get current irq_server for the given irq
361 irq_server = get_irq_server(virq, 1);
362 if (irq_server == -1) {
364 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
365 printk(KERN_WARNING "xics_set_affinity: No online cpus in "
366 "the mask %s for irq %d\n", cpulist, virq);
370 status = rtas_call(ibm_set_xive, 3, 1, NULL,
371 irq, irq_server, xics_status[1]);
374 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
375 "returns %d\n", irq, status);
380 static struct irq_chip xics_pic_direct = {
381 .typename = " XICS ",
382 .startup = xics_startup,
383 .mask = xics_mask_irq,
384 .unmask = xics_unmask_irq,
385 .eoi = xics_eoi_direct,
386 .set_affinity = xics_set_affinity
389 static struct irq_chip xics_pic_lpar = {
390 .typename = " XICS ",
391 .startup = xics_startup,
392 .mask = xics_mask_irq,
393 .unmask = xics_unmask_irq,
394 .eoi = xics_eoi_lpar,
395 .set_affinity = xics_set_affinity
399 /* Interface to arch irq controller subsystem layer */
401 /* Points to the irq_chip we're actually using */
402 static struct irq_chip *xics_irq_chip;
404 static int xics_host_match(struct irq_host *h, struct device_node *node)
406 /* IBM machines have interrupt parents of various funky types for things
407 * like vdevices, events, etc... The trick we use here is to match
408 * everything here except the legacy 8259 which is compatible "chrp,iic"
410 return !of_device_is_compatible(node, "chrp,iic");
413 static int xics_host_map(struct irq_host *h, unsigned int virq,
416 pr_debug("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
418 /* Insert the interrupt mapping into the radix tree for fast lookup */
419 irq_radix_revmap_insert(xics_host, virq, hw);
421 get_irq_desc(virq)->status |= IRQ_LEVEL;
422 set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
426 static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
427 u32 *intspec, unsigned int intsize,
428 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
431 /* Current xics implementation translates everything
432 * to level. It is not technically right for MSIs but this
433 * is irrelevant at this point. We might get smarter in the future
435 *out_hwirq = intspec[0];
436 *out_flags = IRQ_TYPE_LEVEL_LOW;
441 static struct irq_host_ops xics_host_ops = {
442 .match = xics_host_match,
443 .map = xics_host_map,
444 .xlate = xics_host_xlate,
447 static void __init xics_init_host(void)
449 if (firmware_has_feature(FW_FEATURE_LPAR))
450 xics_irq_chip = &xics_pic_lpar;
452 xics_irq_chip = &xics_pic_direct;
454 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
456 BUG_ON(xics_host == NULL);
457 irq_set_default_host(xics_host);
461 /* Inter-processor interrupt support */
465 * XICS only has a single IPI, so encode the messages per CPU
467 struct xics_ipi_struct {
469 } ____cacheline_aligned;
471 static struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
473 static inline void smp_xics_do_message(int cpu, int msg)
475 set_bit(msg, &xics_ipi_message[cpu].value);
477 if (firmware_has_feature(FW_FEATURE_LPAR))
478 lpar_qirr_info(cpu, IPI_PRIORITY);
480 direct_qirr_info(cpu, IPI_PRIORITY);
483 void smp_xics_message_pass(int target, int msg)
487 if (target < NR_CPUS) {
488 smp_xics_do_message(target, msg);
490 for_each_online_cpu(i) {
491 if (target == MSG_ALL_BUT_SELF
492 && i == smp_processor_id())
494 smp_xics_do_message(i, msg);
499 static irqreturn_t xics_ipi_dispatch(int cpu)
501 WARN_ON(cpu_is_offline(cpu));
503 while (xics_ipi_message[cpu].value) {
504 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
505 &xics_ipi_message[cpu].value)) {
507 smp_message_recv(PPC_MSG_CALL_FUNCTION);
509 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
510 &xics_ipi_message[cpu].value)) {
512 smp_message_recv(PPC_MSG_RESCHEDULE);
514 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
515 &xics_ipi_message[cpu].value)) {
517 smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
519 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
520 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
521 &xics_ipi_message[cpu].value)) {
523 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
530 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
532 int cpu = smp_processor_id();
534 direct_qirr_info(cpu, 0xff);
536 return xics_ipi_dispatch(cpu);
539 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
541 int cpu = smp_processor_id();
543 lpar_qirr_info(cpu, 0xff);
545 return xics_ipi_dispatch(cpu);
548 static void xics_request_ipi(void)
553 ipi = irq_create_mapping(xics_host, XICS_IPI);
554 BUG_ON(ipi == NO_IRQ);
557 * IPIs are marked IRQF_DISABLED as they must run with irqs
560 set_irq_handler(ipi, handle_percpu_irq);
561 if (firmware_has_feature(FW_FEATURE_LPAR))
562 rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
565 rc = request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
570 int __init smp_xics_probe(void)
574 return cpus_weight(cpu_possible_map);
577 #endif /* CONFIG_SMP */
582 static void xics_update_irq_servers(void)
585 struct device_node *np;
587 const u32 *ireg, *isize;
590 /* Find the server numbers for the boot cpu. */
591 np = of_get_cpu_node(boot_cpuid, NULL);
594 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
600 i = ilen / sizeof(int);
601 hcpuid = get_hard_smp_processor_id(boot_cpuid);
603 /* Global interrupt distribution server is specified in the last
604 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
605 * entry fom this property for current boot cpu id and use it as
606 * default distribution server
608 for (j = 0; j < i; j += 2) {
609 if (ireg[j] == hcpuid) {
610 default_server = hcpuid;
611 default_distrib_server = ireg[j+1];
613 isize = of_get_property(np,
614 "ibm,interrupt-server#-size", NULL);
616 interrupt_server_size = *isize;
623 static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
629 /* This may look gross but it's good enough for now, we don't quite
630 * have a hard -> linux processor id matching.
632 for_each_possible_cpu(i) {
635 if (hw_id == get_hard_smp_processor_id(i)) {
636 xics_per_cpu[i] = ioremap(addr, size);
643 xics_per_cpu[0] = ioremap(addr, size);
644 #endif /* CONFIG_SMP */
647 static void __init xics_init_one_node(struct device_node *np,
653 /* This code does the theorically broken assumption that the interrupt
654 * server numbers are the same as the hard CPU numbers.
655 * This happens to be the case so far but we are playing with fire...
656 * should be fixed one of these days. -BenH.
658 ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
660 /* Do that ever happen ? we'll know soon enough... but even good'old
661 * f80 does have that property ..
663 WARN_ON(ireg == NULL);
666 * set node starting index for this node
670 ireg = of_get_property(np, "reg", &ilen);
672 panic("xics_init_IRQ: can't find interrupt reg property");
674 while (ilen >= (4 * sizeof(u32))) {
675 unsigned long addr, size;
677 /* XXX Use proper OF parsing code here !!! */
678 addr = (unsigned long)*ireg++ << 32;
682 size = (unsigned long)*ireg++ << 32;
686 xics_map_one_cpu(*indx, addr, size);
691 void __init xics_init_IRQ(void)
693 struct device_node *np;
697 ppc64_boot_msg(0x20, "XICS Init");
699 ibm_get_xive = rtas_token("ibm,get-xive");
700 ibm_set_xive = rtas_token("ibm,set-xive");
701 ibm_int_on = rtas_token("ibm,int-on");
702 ibm_int_off = rtas_token("ibm,int-off");
704 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
706 if (firmware_has_feature(FW_FEATURE_LPAR))
708 xics_init_one_node(np, &indx);
713 xics_update_irq_servers();
716 if (firmware_has_feature(FW_FEATURE_LPAR))
717 ppc_md.get_irq = xics_get_irq_lpar;
719 ppc_md.get_irq = xics_get_irq_direct;
723 ppc64_boot_msg(0x21, "XICS Done");
726 /* Cpu startup, shutdown, and hotplug */
728 static void xics_set_cpu_priority(unsigned char cppr)
730 if (firmware_has_feature(FW_FEATURE_LPAR))
731 lpar_cppr_info(cppr);
733 direct_cppr_info(cppr);
738 void xics_setup_cpu(void)
740 xics_set_cpu_priority(0xff);
743 * Put the calling processor into the GIQ. This is really only
744 * necessary from a secondary thread as the OF start-cpu interface
745 * performs this function for us on primary threads.
747 * XXX: undo of teardown on kexec needs this too, as may hotplug
749 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
750 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
753 void xics_teardown_cpu(void)
755 int cpu = smp_processor_id();
757 xics_set_cpu_priority(0);
762 if (firmware_has_feature(FW_FEATURE_LPAR))
763 lpar_qirr_info(cpu, 0xff);
765 direct_qirr_info(cpu, 0xff);
768 void xics_kexec_teardown_cpu(int secondary)
771 struct irq_desc *desc;
776 * we need to EOI the IPI
778 * probably need to check all the other interrupts too
779 * should we be flagging idle loop instead?
780 * or creating some task to be scheduled?
783 ipi = irq_find_mapping(xics_host, XICS_IPI);
784 if (ipi == XICS_IRQ_SPURIOUS)
786 desc = get_irq_desc(ipi);
787 if (desc->chip && desc->chip->eoi)
788 desc->chip->eoi(ipi);
791 * Some machines need to have at least one cpu in the GIQ,
792 * so leave the master cpu in the group.
795 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
796 (1UL << interrupt_server_size) - 1 -
797 default_distrib_server, 0);
800 #ifdef CONFIG_HOTPLUG_CPU
802 /* Interrupts are disabled. */
803 void xics_migrate_irqs_away(void)
806 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
807 unsigned int irq, virq;
809 /* If we used to be the default server, move to the new "boot_cpuid" */
810 if (hw_cpu == default_server)
811 xics_update_irq_servers();
813 /* Reject any interrupt that was queued to us... */
814 xics_set_cpu_priority(0);
816 /* remove ourselves from the global interrupt queue */
817 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
818 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
821 /* Allow IPIs again... */
822 xics_set_cpu_priority(DEFAULT_PRIORITY);
825 struct irq_desc *desc;
829 /* We cant set affinity on ISA interrupts */
830 if (virq < NUM_ISA_INTERRUPTS)
832 if (irq_map[virq].host != xics_host)
834 irq = (unsigned int)irq_map[virq].hwirq;
835 /* We need to get IPIs still. */
836 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
838 desc = get_irq_desc(virq);
840 /* We only need to migrate enabled IRQS */
841 if (desc == NULL || desc->chip == NULL
842 || desc->action == NULL
843 || desc->chip->set_affinity == NULL)
846 spin_lock_irqsave(&desc->lock, flags);
848 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
850 printk(KERN_ERR "migrate_irqs_away: irq=%u "
851 "ibm,get-xive returns %d\n",
857 * We only support delivery to all cpus or to one cpu.
858 * The irq has to be migrated only in the single cpu
861 if (xics_status[0] != hw_cpu)
864 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
867 /* Reset affinity to all cpus */
868 irq_desc[virq].affinity = CPU_MASK_ALL;
869 desc->chip->set_affinity(virq, CPU_MASK_ALL);
871 spin_unlock_irqrestore(&desc->lock, flags);