4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
31 #include <asm/ptrace.h>
34 #include <asm/sync_bitops.h>
35 #include <asm/xen/hypercall.h>
36 #include <asm/xen/hypervisor.h>
38 #include <xen/xen-ops.h>
39 #include <xen/events.h>
40 #include <xen/interface/xen.h>
41 #include <xen/interface/event_channel.h>
44 * This lock protects updates to the following mapping and reference-count
45 * arrays. The lock does not need to be acquired to read the mapping tables.
47 static DEFINE_SPINLOCK(irq_mapping_update_lock);
49 /* IRQ <-> VIRQ mapping. */
50 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
52 /* IRQ <-> IPI mapping */
53 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
55 /* Interrupt types. */
65 * Packed IRQ information:
66 * type - enum xen_irq_type
67 * event channel - irq->event channel mapping
68 * cpu - cpu this event channel is bound to
69 * index - type-specific information:
70 * PIRQ - vector, with MSB being "needs EIO"
77 enum xen_irq_type type; /* type */
78 unsigned short evtchn; /* event channel */
79 unsigned short cpu; /* cpu bound */
86 unsigned short vector;
91 static struct irq_info irq_info[NR_IRQS];
93 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
94 [0 ... NR_EVENT_CHANNELS-1] = -1
97 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
99 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
100 static inline unsigned long *cpu_evtchn_mask(int cpu)
102 return cpu_evtchn_mask_p[cpu].bits;
105 /* Reference counts for bindings to IRQs. */
106 static int irq_bindcount[NR_IRQS];
108 /* Xen will never allocate port zero for any purpose. */
109 #define VALID_EVTCHN(chn) ((chn) != 0)
111 static struct irq_chip xen_dynamic_chip;
113 /* Constructor for packed IRQ information. */
114 static struct irq_info mk_unbound_info(void)
116 return (struct irq_info) { .type = IRQT_UNBOUND };
119 static struct irq_info mk_evtchn_info(unsigned short evtchn)
121 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn };
124 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
126 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
130 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
132 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
136 static struct irq_info mk_pirq_info(unsigned short evtchn,
137 unsigned short gsi, unsigned short vector)
139 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
140 .u.pirq = { .gsi = gsi, .vector = vector } };
144 * Accessors for packed IRQ information.
146 static struct irq_info *info_for_irq(unsigned irq)
148 return &irq_info[irq];
151 static unsigned int evtchn_from_irq(unsigned irq)
153 return info_for_irq(irq)->evtchn;
156 static enum ipi_vector ipi_from_irq(unsigned irq)
158 struct irq_info *info = info_for_irq(irq);
160 BUG_ON(info == NULL);
161 BUG_ON(info->type != IRQT_IPI);
166 static unsigned virq_from_irq(unsigned irq)
168 struct irq_info *info = info_for_irq(irq);
170 BUG_ON(info == NULL);
171 BUG_ON(info->type != IRQT_VIRQ);
176 static unsigned gsi_from_irq(unsigned irq)
178 struct irq_info *info = info_for_irq(irq);
180 BUG_ON(info == NULL);
181 BUG_ON(info->type != IRQT_PIRQ);
183 return info->u.pirq.gsi;
186 static unsigned vector_from_irq(unsigned irq)
188 struct irq_info *info = info_for_irq(irq);
190 BUG_ON(info == NULL);
191 BUG_ON(info->type != IRQT_PIRQ);
193 return info->u.pirq.vector;
196 static enum xen_irq_type type_from_irq(unsigned irq)
198 return info_for_irq(irq)->type;
201 static unsigned cpu_from_irq(unsigned irq)
203 return info_for_irq(irq)->cpu;
206 static unsigned int cpu_from_evtchn(unsigned int evtchn)
208 int irq = evtchn_to_irq[evtchn];
212 ret = cpu_from_irq(irq);
217 static inline unsigned long active_evtchns(unsigned int cpu,
218 struct shared_info *sh,
221 return (sh->evtchn_pending[idx] &
222 cpu_evtchn_mask(cpu)[idx] &
223 ~sh->evtchn_mask[idx]);
226 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
228 int irq = evtchn_to_irq[chn];
232 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
235 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
236 __set_bit(chn, cpu_evtchn_mask(cpu));
238 irq_info[irq].cpu = cpu;
241 static void init_evtchn_cpu_bindings(void)
244 struct irq_desc *desc;
247 /* By default all event channels notify CPU#0. */
248 for_each_irq_desc(i, desc) {
249 cpumask_copy(desc->affinity, cpumask_of(0));
253 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
256 static inline void clear_evtchn(int port)
258 struct shared_info *s = HYPERVISOR_shared_info;
259 sync_clear_bit(port, &s->evtchn_pending[0]);
262 static inline void set_evtchn(int port)
264 struct shared_info *s = HYPERVISOR_shared_info;
265 sync_set_bit(port, &s->evtchn_pending[0]);
268 static inline int test_evtchn(int port)
270 struct shared_info *s = HYPERVISOR_shared_info;
271 return sync_test_bit(port, &s->evtchn_pending[0]);
276 * notify_remote_via_irq - send event to remote end of event channel via irq
277 * @irq: irq of event channel to send event to
279 * Unlike notify_remote_via_evtchn(), this is safe to use across
280 * save/restore. Notifications on a broken connection are silently
283 void notify_remote_via_irq(int irq)
285 int evtchn = evtchn_from_irq(irq);
287 if (VALID_EVTCHN(evtchn))
288 notify_remote_via_evtchn(evtchn);
290 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
292 static void mask_evtchn(int port)
294 struct shared_info *s = HYPERVISOR_shared_info;
295 sync_set_bit(port, &s->evtchn_mask[0]);
298 static void unmask_evtchn(int port)
300 struct shared_info *s = HYPERVISOR_shared_info;
301 unsigned int cpu = get_cpu();
303 BUG_ON(!irqs_disabled());
305 /* Slow path (hypercall) if this is a non-local port. */
306 if (unlikely(cpu != cpu_from_evtchn(port))) {
307 struct evtchn_unmask unmask = { .port = port };
308 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
310 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
312 sync_clear_bit(port, &s->evtchn_mask[0]);
315 * The following is basically the equivalent of
316 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
317 * the interrupt edge' if the channel is masked.
319 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
320 !sync_test_and_set_bit(port / BITS_PER_LONG,
321 &vcpu_info->evtchn_pending_sel))
322 vcpu_info->evtchn_upcall_pending = 1;
328 static int find_unbound_irq(void)
331 struct irq_desc *desc;
333 /* Only allocate from dynirq range */
334 for (irq = 0; irq < nr_irqs; irq++)
335 if (irq_bindcount[irq] == 0)
339 panic("No available IRQ to bind to: increase nr_irqs!\n");
341 desc = irq_to_desc_alloc_cpu(irq, 0);
342 if (WARN_ON(desc == NULL))
345 dynamic_irq_init(irq);
350 int bind_evtchn_to_irq(unsigned int evtchn)
354 spin_lock(&irq_mapping_update_lock);
356 irq = evtchn_to_irq[evtchn];
359 irq = find_unbound_irq();
361 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
362 handle_level_irq, "event");
364 evtchn_to_irq[evtchn] = irq;
365 irq_info[irq] = mk_evtchn_info(evtchn);
368 irq_bindcount[irq]++;
370 spin_unlock(&irq_mapping_update_lock);
374 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
376 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
378 struct evtchn_bind_ipi bind_ipi;
381 spin_lock(&irq_mapping_update_lock);
383 irq = per_cpu(ipi_to_irq, cpu)[ipi];
385 irq = find_unbound_irq();
389 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
390 handle_level_irq, "ipi");
393 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
396 evtchn = bind_ipi.port;
398 evtchn_to_irq[evtchn] = irq;
399 irq_info[irq] = mk_ipi_info(evtchn, ipi);
401 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
403 bind_evtchn_to_cpu(evtchn, cpu);
406 irq_bindcount[irq]++;
409 spin_unlock(&irq_mapping_update_lock);
414 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
416 struct evtchn_bind_virq bind_virq;
419 spin_lock(&irq_mapping_update_lock);
421 irq = per_cpu(virq_to_irq, cpu)[virq];
424 bind_virq.virq = virq;
425 bind_virq.vcpu = cpu;
426 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
429 evtchn = bind_virq.port;
431 irq = find_unbound_irq();
433 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
434 handle_level_irq, "virq");
436 evtchn_to_irq[evtchn] = irq;
437 irq_info[irq] = mk_virq_info(evtchn, virq);
439 per_cpu(virq_to_irq, cpu)[virq] = irq;
441 bind_evtchn_to_cpu(evtchn, cpu);
444 irq_bindcount[irq]++;
446 spin_unlock(&irq_mapping_update_lock);
451 static void unbind_from_irq(unsigned int irq)
453 struct evtchn_close close;
454 int evtchn = evtchn_from_irq(irq);
456 spin_lock(&irq_mapping_update_lock);
458 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
463 switch (type_from_irq(irq)) {
465 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
466 [virq_from_irq(irq)] = -1;
469 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
470 [ipi_from_irq(irq)] = -1;
476 /* Closed ports are implicitly re-bound to VCPU0. */
477 bind_evtchn_to_cpu(evtchn, 0);
479 evtchn_to_irq[evtchn] = -1;
480 irq_info[irq] = mk_unbound_info();
482 dynamic_irq_cleanup(irq);
485 spin_unlock(&irq_mapping_update_lock);
488 int bind_evtchn_to_irqhandler(unsigned int evtchn,
489 irq_handler_t handler,
490 unsigned long irqflags,
491 const char *devname, void *dev_id)
496 irq = bind_evtchn_to_irq(evtchn);
497 retval = request_irq(irq, handler, irqflags, devname, dev_id);
499 unbind_from_irq(irq);
505 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
507 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
508 irq_handler_t handler,
509 unsigned long irqflags, const char *devname, void *dev_id)
514 irq = bind_virq_to_irq(virq, cpu);
515 retval = request_irq(irq, handler, irqflags, devname, dev_id);
517 unbind_from_irq(irq);
523 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
525 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
527 irq_handler_t handler,
528 unsigned long irqflags,
534 irq = bind_ipi_to_irq(ipi, cpu);
538 retval = request_irq(irq, handler, irqflags, devname, dev_id);
540 unbind_from_irq(irq);
547 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
549 free_irq(irq, dev_id);
550 unbind_from_irq(irq);
552 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
554 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
556 int irq = per_cpu(ipi_to_irq, cpu)[vector];
558 notify_remote_via_irq(irq);
561 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
563 struct shared_info *sh = HYPERVISOR_shared_info;
564 int cpu = smp_processor_id();
567 static DEFINE_SPINLOCK(debug_lock);
569 spin_lock_irqsave(&debug_lock, flags);
571 printk("vcpu %d\n ", cpu);
573 for_each_online_cpu(i) {
574 struct vcpu_info *v = per_cpu(xen_vcpu, i);
575 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
576 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
577 v->evtchn_upcall_pending,
578 v->evtchn_pending_sel);
580 printk("pending:\n ");
581 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
582 printk("%08lx%s", sh->evtchn_pending[i],
583 i % 8 == 0 ? "\n " : " ");
584 printk("\nmasks:\n ");
585 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
586 printk("%08lx%s", sh->evtchn_mask[i],
587 i % 8 == 0 ? "\n " : " ");
589 printk("\nunmasked:\n ");
590 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
591 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
592 i % 8 == 0 ? "\n " : " ");
594 printk("\npending list:\n");
595 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
596 if (sync_test_bit(i, sh->evtchn_pending)) {
597 printk(" %d: event %d -> irq %d\n",
598 cpu_from_evtchn(i), i,
603 spin_unlock_irqrestore(&debug_lock, flags);
609 static void xen_do_irq(unsigned irq, struct pt_regs *regs)
611 struct pt_regs *old_regs = set_irq_regs(regs);
613 if (WARN_ON(irq == -1))
619 //printk("cpu %d handling irq %d\n", smp_processor_id(), info->irq);
620 handle_irq(irq, regs);
624 set_irq_regs(old_regs);
628 * Search the CPUs pending events bitmasks. For each one found, map
629 * the event number to an irq, and feed it into do_IRQ() for
632 * Xen uses a two-level bitmap to speed searching. The first level is
633 * a bitset of words which contain pending event bits. The second
634 * level is a bitset of pending events themselves.
636 void xen_evtchn_do_upcall(struct pt_regs *regs)
639 struct shared_info *s = HYPERVISOR_shared_info;
640 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
641 static DEFINE_PER_CPU(unsigned, nesting_count);
645 unsigned long pending_words;
647 vcpu_info->evtchn_upcall_pending = 0;
649 if (__get_cpu_var(nesting_count)++)
652 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
653 /* Clear master flag /before/ clearing selector flag. */
656 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
657 while (pending_words != 0) {
658 unsigned long pending_bits;
659 int word_idx = __ffs(pending_words);
660 pending_words &= ~(1UL << word_idx);
662 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
663 int bit_idx = __ffs(pending_bits);
664 int port = (word_idx * BITS_PER_LONG) + bit_idx;
665 int irq = evtchn_to_irq[port];
667 xen_do_irq(irq, regs);
671 BUG_ON(!irqs_disabled());
673 count = __get_cpu_var(nesting_count);
674 __get_cpu_var(nesting_count) = 0;
681 /* Rebind a new event channel to an existing irq. */
682 void rebind_evtchn_irq(int evtchn, int irq)
684 /* Make sure the irq is masked, since the new event channel
685 will also be masked. */
688 spin_lock(&irq_mapping_update_lock);
690 /* After resume the irq<->evtchn mappings are all cleared out */
691 BUG_ON(evtchn_to_irq[evtchn] != -1);
692 /* Expect irq to have been bound before,
693 so the bindcount should be non-0 */
694 BUG_ON(irq_bindcount[irq] == 0);
696 evtchn_to_irq[evtchn] = irq;
697 irq_info[irq] = mk_evtchn_info(evtchn);
699 spin_unlock(&irq_mapping_update_lock);
701 /* new event channels are always bound to cpu 0 */
702 irq_set_affinity(irq, cpumask_of(0));
704 /* Unmask the event channel. */
708 /* Rebind an evtchn so that it gets delivered to a specific cpu */
709 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
711 struct evtchn_bind_vcpu bind_vcpu;
712 int evtchn = evtchn_from_irq(irq);
714 if (!VALID_EVTCHN(evtchn))
717 /* Send future instances of this interrupt to other vcpu. */
718 bind_vcpu.port = evtchn;
719 bind_vcpu.vcpu = tcpu;
722 * If this fails, it usually just indicates that we're dealing with a
723 * virq or IPI channel, which don't actually need to be rebound. Ignore
724 * it, but don't do the xenlinux-level rebind in that case.
726 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
727 bind_evtchn_to_cpu(evtchn, tcpu);
731 static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
733 unsigned tcpu = cpumask_first(dest);
734 rebind_irq_to_cpu(irq, tcpu);
737 int resend_irq_on_evtchn(unsigned int irq)
739 int masked, evtchn = evtchn_from_irq(irq);
740 struct shared_info *s = HYPERVISOR_shared_info;
742 if (!VALID_EVTCHN(evtchn))
745 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
746 sync_set_bit(evtchn, s->evtchn_pending);
748 unmask_evtchn(evtchn);
753 static void enable_dynirq(unsigned int irq)
755 int evtchn = evtchn_from_irq(irq);
757 if (VALID_EVTCHN(evtchn))
758 unmask_evtchn(evtchn);
761 static void disable_dynirq(unsigned int irq)
763 int evtchn = evtchn_from_irq(irq);
765 if (VALID_EVTCHN(evtchn))
769 static void ack_dynirq(unsigned int irq)
771 int evtchn = evtchn_from_irq(irq);
773 move_native_irq(irq);
775 if (VALID_EVTCHN(evtchn))
776 clear_evtchn(evtchn);
779 static int retrigger_dynirq(unsigned int irq)
781 int evtchn = evtchn_from_irq(irq);
782 struct shared_info *sh = HYPERVISOR_shared_info;
785 if (VALID_EVTCHN(evtchn)) {
788 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
789 sync_set_bit(evtchn, sh->evtchn_pending);
791 unmask_evtchn(evtchn);
798 static void restore_cpu_virqs(unsigned int cpu)
800 struct evtchn_bind_virq bind_virq;
801 int virq, irq, evtchn;
803 for (virq = 0; virq < NR_VIRQS; virq++) {
804 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
807 BUG_ON(virq_from_irq(irq) != virq);
809 /* Get a new binding from Xen. */
810 bind_virq.virq = virq;
811 bind_virq.vcpu = cpu;
812 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
815 evtchn = bind_virq.port;
817 /* Record the new mapping. */
818 evtchn_to_irq[evtchn] = irq;
819 irq_info[irq] = mk_virq_info(evtchn, virq);
820 bind_evtchn_to_cpu(evtchn, cpu);
823 unmask_evtchn(evtchn);
827 static void restore_cpu_ipis(unsigned int cpu)
829 struct evtchn_bind_ipi bind_ipi;
830 int ipi, irq, evtchn;
832 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
833 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
836 BUG_ON(ipi_from_irq(irq) != ipi);
838 /* Get a new binding from Xen. */
840 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
843 evtchn = bind_ipi.port;
845 /* Record the new mapping. */
846 evtchn_to_irq[evtchn] = irq;
847 irq_info[irq] = mk_ipi_info(evtchn, ipi);
848 bind_evtchn_to_cpu(evtchn, cpu);
851 unmask_evtchn(evtchn);
856 /* Clear an irq's pending state, in preparation for polling on it */
857 void xen_clear_irq_pending(int irq)
859 int evtchn = evtchn_from_irq(irq);
861 if (VALID_EVTCHN(evtchn))
862 clear_evtchn(evtchn);
865 void xen_set_irq_pending(int irq)
867 int evtchn = evtchn_from_irq(irq);
869 if (VALID_EVTCHN(evtchn))
873 bool xen_test_irq_pending(int irq)
875 int evtchn = evtchn_from_irq(irq);
878 if (VALID_EVTCHN(evtchn))
879 ret = test_evtchn(evtchn);
884 /* Poll waiting for an irq to become pending. In the usual case, the
885 irq will be disabled so it won't deliver an interrupt. */
886 void xen_poll_irq(int irq)
888 evtchn_port_t evtchn = evtchn_from_irq(irq);
890 if (VALID_EVTCHN(evtchn)) {
891 struct sched_poll poll;
895 set_xen_guest_handle(poll.ports, &evtchn);
897 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
902 void xen_irq_resume(void)
904 unsigned int cpu, irq, evtchn;
906 init_evtchn_cpu_bindings();
908 /* New event-channel space is not 'live' yet. */
909 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
912 /* No IRQ <-> event-channel mappings. */
913 for (irq = 0; irq < nr_irqs; irq++)
914 irq_info[irq].evtchn = 0; /* zap event-channel binding */
916 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
917 evtchn_to_irq[evtchn] = -1;
919 for_each_possible_cpu(cpu) {
920 restore_cpu_virqs(cpu);
921 restore_cpu_ipis(cpu);
925 static struct irq_chip xen_dynamic_chip __read_mostly = {
928 .disable = disable_dynirq,
929 .mask = disable_dynirq,
930 .unmask = enable_dynirq,
933 .set_affinity = set_affinity_irq,
934 .retrigger = retrigger_dynirq,
937 void __init xen_init_IRQ(void)
940 size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
942 cpu_evtchn_mask_p = alloc_bootmem(size);
943 BUG_ON(cpu_evtchn_mask_p == NULL);
945 init_evtchn_cpu_bindings();
947 /* No event channels are 'live' right now. */
948 for (i = 0; i < NR_EVENT_CHANNELS; i++)
951 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
952 for (i = 0; i < nr_irqs; i++)
953 irq_bindcount[i] = 0;
955 irq_ctx_init(smp_processor_id());