*/
 #define irq_work(__cpu)        &(trap_block[(__cpu)].irq_worklist)
 
-static struct irqaction *irq_action[NR_IRQS];
+static struct irqaction timer_irq_action = {
+       .name = "timer",
+};
+static struct irqaction *irq_action[NR_IRQS] = { &timer_irq_action, };
 
 /* This only synchronizes entities which modify IRQ handler
  * state and some selected user-level spots that want to
  */
 static DEFINE_SPINLOCK(irq_action_lock);
 
+static unsigned int virt_to_real_irq_table[NR_IRQS];
+static unsigned char virt_irq_cur = 1;
+
+static unsigned char virt_irq_alloc(unsigned int real_irq)
+{
+       unsigned char ent;
+
+       BUILD_BUG_ON(NR_IRQS >= 256);
+
+       ent = virt_irq_cur;
+       if (ent >= NR_IRQS) {
+               printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+               return 0;
+       }
+
+       virt_irq_cur = ent + 1;
+       virt_to_real_irq_table[ent] = real_irq;
+
+       return ent;
+}
+
+#if 0 /* Currently unused. */
+static unsigned char real_to_virt_irq(unsigned int real_irq)
+{
+       struct ino_bucket *bucket = __bucket(real_irq);
+
+       return bucket->virt_irq;
+}
+#endif
+
+static unsigned int virt_to_real_irq(unsigned char virt_irq)
+{
+       return virt_to_real_irq_table[virt_irq];
+}
+
+void irq_install_pre_handler(int virt_irq,
+                            void (*func)(struct ino_bucket *, void *, void *),
+                            void *arg1, void *arg2)
+{
+       unsigned int real_irq = virt_to_real_irq(virt_irq);
+       struct ino_bucket *bucket;
+       struct irq_desc *d;
+
+       if (unlikely(!real_irq))
+               return;
+
+       bucket = __bucket(real_irq);
+       d = bucket->irq_info;
+       d->pre_handler = func;
+       d->pre_handler_arg1 = arg1;
+       d->pre_handler_arg2 = arg2;
+}
+
 static void register_irq_proc (unsigned int irq);
 
 /*
        return tid;
 }
 
-/* Now these are always passed a true fully specified sun4u INO. */
-void enable_irq(unsigned int irq)
+void enable_irq(unsigned int virt_irq)
 {
-       struct ino_bucket *bucket = __bucket(irq);
+       unsigned int real_irq = virt_to_real_irq(virt_irq);
+       struct ino_bucket *bucket;
        unsigned long imap, cpuid;
 
+       if (unlikely(!real_irq))
+               return;
+
+       bucket = __bucket(real_irq);
        imap = bucket->imap;
-       if (imap == 0UL)
+       if (unlikely(imap == 0UL))
                return;
 
        preempt_disable();
        cpuid = real_hard_smp_processor_id();
 
        if (tlb_type == hypervisor) {
-               unsigned int ino = __irq_ino(irq);
+               unsigned int ino = __irq_ino(real_irq);
                int err;
 
                err = sun4v_intr_settarget(ino, cpuid);
        preempt_enable();
 }
 
-/* This now gets passed true ino's as well. */
-void disable_irq(unsigned int irq)
+void disable_irq(unsigned int virt_irq)
 {
-       struct ino_bucket *bucket = __bucket(irq);
+       unsigned int real_irq = virt_to_real_irq(virt_irq);
+       struct ino_bucket *bucket;
        unsigned long imap;
 
+       if (unlikely(!real_irq))
+               return;
+
+       bucket = __bucket(real_irq);
        imap = bucket->imap;
-       if (imap != 0UL) {
-               if (tlb_type == hypervisor) {
-                       unsigned int ino = __irq_ino(irq);
-                       int err;
-
-                       err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
-                       if (err != HV_EOK)
-                               printk("sun4v_intr_setenabled(%x): "
-                                      "err(%d)\n", ino, err);
-               } else {
-                       u32 tmp;
-
-                       /* NOTE: We do not want to futz with the IRQ clear registers
-                        *       and move the state to IDLE, the SCSI code does call
-                        *       disable_irq() to assure atomicity in the queue cmd
-                        *       SCSI adapter driver code.  Thus we'd lose interrupts.
-                        */
-                       tmp = upa_readl(imap);
-                       tmp &= ~IMAP_VALID;
-                       upa_writel(tmp, imap);
-               }
+       if (unlikely(imap == 0UL))
+               return;
+
+       if (tlb_type == hypervisor) {
+               unsigned int ino = __irq_ino(real_irq);
+               int err;
+
+               err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+               if (err != HV_EOK)
+                       printk("sun4v_intr_setenabled(%x): "
+                              "err(%d)\n", ino, err);
+       } else {
+               u32 tmp;
+
+               /* NOTE: We do not want to futz with the IRQ clear registers
+                *       and move the state to IDLE, the SCSI code does call
+                *       disable_irq() to assure atomicity in the queue cmd
+                *       SCSI adapter driver code.  Thus we'd lose interrupts.
+                */
+               tmp = upa_readl(imap);
+               tmp &= ~IMAP_VALID;
+               upa_writel(tmp, imap);
        }
 }
 
        prom_halt();
 }
 
-unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
+unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags)
 {
        struct ino_bucket *bucket;
        int ino;
 
        BUG_ON(tlb_type == hypervisor);
 
-       /* RULE: Both must be specified in all other cases. */
+       /* RULE: Both must be specified. */
        if (iclr == 0UL || imap == 0UL) {
                prom_printf("Invalid build_irq %d %016lx %016lx\n",
                            inofixup, iclr, imap);
         */
        bucket->imap  = imap;
        bucket->iclr  = iclr;
-       bucket->flags = 0;
+       if (!bucket->virt_irq)
+               bucket->virt_irq = virt_irq_alloc(__irq(bucket));
+       bucket->flags = flags;
 
 out:
-       return __irq(bucket);
+       return bucket->virt_irq;
 }
 
 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
         */
        bucket->imap = ~0UL - sysino;
        bucket->iclr = ~0UL - sysino;
-
+       if (!bucket->virt_irq)
+               bucket->virt_irq = virt_irq_alloc(__irq(bucket));
        bucket->flags = flags;
 
        bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
                prom_halt();
        }
 
-       return __irq(bucket);
+       return bucket->virt_irq;
 }
 
 static void atomic_bucket_insert(struct ino_bucket *bucket)
        return NULL;
 }
 
-int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+int request_irq(unsigned int virt_irq,
+               irqreturn_t (*handler)(int, void *, struct pt_regs *),
                unsigned long irqflags, const char *name, void *dev_id)
 {
        struct irqaction *action;
-       struct ino_bucket *bucket = __bucket(irq);
+       struct ino_bucket *bucket;
        unsigned long flags;
+       unsigned int real_irq;
        int pending = 0;
 
+       real_irq = virt_to_real_irq(virt_irq);
+       if (unlikely(!real_irq))
+               return -EINVAL;
+
        if (unlikely(!handler))
                return -EINVAL;
 
+       bucket = __bucket(real_irq);
        if (unlikely(!bucket->irq_info))
                return -ENODEV;
 
        if (irqflags & SA_SAMPLE_RANDOM) {
                /*
-                * This function might sleep, we want to call it first,
-                * outside of the atomic block. In SA_STATIC_ALLOC case,
-                * random driver's kmalloc will fail, but it is safe.
-                * If already initialized, random driver will not reinit.
-                * Yes, this might clear the entropy pool if the wrong
-                * driver is attempted to be loaded, without actually
-                * installing a new handler, but is this really a problem,
-                * only the sysadmin is able to do this.
-                */
-               rand_initialize_irq(PIL_DEVICE_IRQ);
+                * This function might sleep, we want to call it first,
+                * outside of the atomic block.
+                * Yes, this might clear the entropy pool if the wrong
+                * driver is attempted to be loaded, without actually
+                * installing a new handler, but is this really a problem,
+                * only the sysadmin is able to do this.
+                */
+               rand_initialize_irq(virt_irq);
        }
 
        spin_lock_irqsave(&irq_action_lock, flags);
 
-       if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) {
+       if (check_irq_sharing(virt_irq, irqflags)) {
                spin_unlock_irqrestore(&irq_action_lock, flags);
                return -EBUSY;
        }
        action->name = name;
        action->next = NULL;
        action->dev_id = dev_id;
-       put_ino_in_irqaction(action, irq);
+       put_ino_in_irqaction(action, __irq_ino(real_irq));
        put_smpaff_in_irqaction(action, CPU_MASK_NONE);
 
-       append_irq_action(PIL_DEVICE_IRQ, action);
+       append_irq_action(virt_irq, action);
 
-       enable_irq(irq);
+       enable_irq(virt_irq);
 
        /* We ate the IVEC already, this makes sure it does not get lost. */
        if (pending) {
 
        spin_unlock_irqrestore(&irq_action_lock, flags);
 
-       register_irq_proc(__irq_ino(irq));
+       register_irq_proc(virt_irq);
 
 #ifdef CONFIG_SMP
        distribute_irqs();
 
 EXPORT_SYMBOL(request_irq);
 
-static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
+static struct irqaction *unlink_irq_action(unsigned int virt_irq, void *dev_id)
 {
        struct irqaction *action, **pp;
 
-       pp = irq_action + PIL_DEVICE_IRQ;
+       pp = irq_action + virt_irq;
        action = *pp;
        if (unlikely(!action))
                return NULL;
 
        if (unlikely(!action->handler)) {
-               printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ);
+               printk("Freeing free IRQ %d\n", virt_irq);
                return NULL;
        }
 
        return action;
 }
 
-void free_irq(unsigned int irq, void *dev_id)
+void free_irq(unsigned int virt_irq, void *dev_id)
 {
        struct irqaction *action;
        struct ino_bucket *bucket;
        struct irq_desc *desc;
        unsigned long flags;
+       unsigned int real_irq;
        int ent, i;
 
+       real_irq = virt_to_real_irq(virt_irq);
+       if (unlikely(!real_irq))
+               return;
+
        spin_lock_irqsave(&irq_action_lock, flags);
 
-       action = unlink_irq_action(irq, dev_id);
+       action = unlink_irq_action(virt_irq, dev_id);
 
        spin_unlock_irqrestore(&irq_action_lock, flags);
 
        if (unlikely(!action))
                return;
 
-       synchronize_irq(irq);
+       synchronize_irq(virt_irq);
 
        spin_lock_irqsave(&irq_action_lock, flags);
 
-       bucket = __bucket(irq);
+       bucket = __bucket(real_irq);
        desc = bucket->irq_info;
 
        for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
                 * the same IMAP are active.
                 */
                if (ent == NUM_IVECS)
-                       disable_irq(irq);
+                       disable_irq(virt_irq);
        }
 
        spin_unlock_irqrestore(&irq_action_lock, flags);
 EXPORT_SYMBOL(free_irq);
 
 #ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
+void synchronize_irq(unsigned int virt_irq)
 {
-       struct ino_bucket *bucket = __bucket(irq);
+       unsigned int real_irq = virt_to_real_irq(virt_irq);
+       struct ino_bucket *bucket;
+
+       if (unlikely(!real_irq))
+               return;
 
+       bucket = __bucket(real_irq);
 #if 0
        /* The following is how I wish I could implement this.
         * Unfortunately the ICLR registers are read-only, you can
 
                action_mask &= ~mask;
 
-               if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
+               if (p->handler(bp->virt_irq, p->dev_id, regs) == IRQ_HANDLED)
                        random |= p->flags;
 
                if (!action_mask)
 
        /* Test and add entropy */
        if (random & SA_SAMPLE_RANDOM)
-               add_interrupt_randomness(PIL_DEVICE_IRQ);
+               add_interrupt_randomness(bp->virt_irq);
 out:
        bp->flags &= ~IBF_INPROGRESS;
 }
        clear_softint(clr_mask);
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_this_cpu.irqs[0]++;
        timer_interrupt(irq, NULL, regs);
        irq_exit();
 }
                             : "g1");
 }
 
-static struct proc_dir_entry * root_irq_dir;
-static struct proc_dir_entry * irq_dir [NUM_IVECS];
+static struct proc_dir_entry *root_irq_dir;
+static struct proc_dir_entry *irq_dir[NR_IRQS];
 
 #ifdef CONFIG_SMP
 
-static int irq_affinity_read_proc (char *page, char **start, off_t off,
-                       int count, int *eof, void *data)
+static int irq_affinity_read_proc(char *page, char **start, off_t off,
+                                 int count, int *eof, void *data)
 {
        struct ino_bucket *bp = ivector_table + (long)data;
        struct irq_desc *desc = bp->irq_info;
        return len;
 }
 
-static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
+static inline void set_intr_affinity(int virt_irq, cpumask_t hw_aff)
 {
-       struct ino_bucket *bp = ivector_table + irq;
-       struct irq_desc *desc = bp->irq_info;
-       struct irqaction *ap = desc->action;
+       struct ino_bucket *bp;
+       struct irq_desc *desc;
+       struct irqaction *ap;
+       unsigned int real_irq;
+
+       real_irq = virt_to_real_irq(virt_irq);
+       if (unlikely(!real_irq))
+               return;
+
+       bp = __bucket(real_irq);
+       desc = bp->irq_info;
+       ap = desc->action;
 
        /* Users specify affinity in terms of hw cpu ids.
         * As soon as we do this, handler_irq() might see and take action.
 
        /* Migration is simply done by the next cpu to service this
         * interrupt.
+        *
+        * XXX Broken, this doesn't happen anymore...
         */
 }
 
-static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
-                                       unsigned long count, void *data)
+static int irq_affinity_write_proc(struct file *file,
+                                  const char __user *buffer,
+                                  unsigned long count, void *data)
 {
-       int irq = (long) data, full_count = count, err;
+       int virt_irq = (long) data, full_count = count, err;
        cpumask_t new_value;
 
        err = cpumask_parse(buffer, count, new_value);
        if (cpus_empty(new_value))
                return -EINVAL;
 
-       set_intr_affinity(irq, new_value);
+       set_intr_affinity(virt_irq, new_value);
 
        return full_count;
 }
 
 #define MAX_NAMELEN 10
 
-static void register_irq_proc (unsigned int irq)
+static void register_irq_proc(unsigned int virt_irq)
 {
        char name [MAX_NAMELEN];
 
-       if (!root_irq_dir || irq_dir[irq])
+       if (!root_irq_dir || irq_dir[virt_irq])
                return;
 
        memset(name, 0, MAX_NAMELEN);
-       sprintf(name, "%x", irq);
+       sprintf(name, "%d", virt_irq);
 
        /* create /proc/irq/1234 */
-       irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+       irq_dir[virt_irq] = proc_mkdir(name, root_irq_dir);
 
 #ifdef CONFIG_SMP
        /* XXX SMP affinity not supported on starfire yet. */
 
                if (entry) {
                        entry->nlink = 1;
-                       entry->data = (void *)(long)irq;
+                       entry->data = (void *)(long)virt_irq;
                        entry->read_proc = irq_affinity_read_proc;
                        entry->write_proc = irq_affinity_write_proc;
                }
 #endif
 }
 
-void init_irq_proc (void)
+void init_irq_proc(void)
 {
        /* create /proc/irq */
        root_irq_dir = proc_mkdir("irq", NULL);
 
        }
 }
 
+static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
+                                       unsigned int ino)
+{
+       ino &= PCI_IRQ_INO;
+       return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
+}
+
+static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
+                                       unsigned int ino)
+{
+       ino &= PCI_IRQ_INO;
+       return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
+}
+
 static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
                                     struct pci_dev *pdev,
                                     unsigned int ino)
 {
-       struct ino_bucket *bucket;
        unsigned long imap, iclr;
-       unsigned long imap_off, iclr_off;
        int ign_fixup;
+       int virt_irq;
 
        ino &= PCI_IRQ_INO;
-       imap_off = schizo_imap_offset(ino);
 
        /* Now build the IRQ bucket. */
-       imap = pbm->pbm_regs + imap_off;
-       imap += 4;
-
-       iclr_off = schizo_iclr_offset(ino);
-       iclr = pbm->pbm_regs + iclr_off;
-       iclr += 4;
+       imap = schizo_ino_to_imap(pbm, ino);
+       iclr = schizo_ino_to_iclr(pbm, ino);
 
        /* On Schizo, no inofixup occurs.  This is because each
         * INO has it's own IMAP register.  On Psycho and Sabre
                        ign_fixup = (1 << 6);
        }
 
-       bucket = __bucket(build_irq(ign_fixup, iclr, imap));
-       bucket->flags |= IBF_PCI;
+       virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI);
 
        if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
-               struct irq_desc *p = bucket->irq_info;
-
-               p->pre_handler = tomatillo_wsync_handler;
-               p->pre_handler_arg1 = ((pbm->chip_version <= 4) ?
-                                      (void *) 1 : (void *) 0);
-               p->pre_handler_arg2 = (void *) pbm->sync_reg;
+               irq_install_pre_handler(virt_irq,
+                                       tomatillo_wsync_handler,
+                                       ((pbm->chip_version <= 4) ?
+                                        (void *) 1 : (void *) 0),
+                                       (void *) pbm->sync_reg);
        }
 
-       return __irq(bucket);
+       return virt_irq;
 }
 
 /* SCHIZO error handling support. */
 static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
 {
        struct pci_pbm_info *pbm;
-       struct ino_bucket *bucket;
        unsigned long iclr;
 
        /* Do not clear the interrupt for the other PCI bus.
        else
                pbm = &p->pbm_A;
 
-       irq = schizo_irq_build(pbm, NULL,
-                              (pbm->portid << 6) | (irq & IMAP_INO));
-       bucket = __bucket(irq);
-       iclr = bucket->iclr;
+       schizo_irq_build(pbm, NULL,
+                        (pbm->portid << 6) | (irq & IMAP_INO));
 
+       iclr = schizo_ino_to_iclr(pbm,
+                                 (pbm->portid << 6) | (irq & IMAP_INO));
        upa_writel(ICLR_IDLE, iclr);
 }
 
 {
        struct pci_pbm_info *pbm;
        unsigned int irq;
-       struct ino_bucket *bucket;
        u64 tmp, err_mask, err_no_mask;
 
        /* Build IRQs and register handlers. */
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
        upa_writel(tmp, (pbm->pbm_regs +
                         schizo_imap_offset(SCHIZO_UE_INO) + 4));
 
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
        upa_writel(tmp, (pbm->pbm_regs +
                         schizo_imap_offset(SCHIZO_CE_INO) + 4));
 
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
+                                                SCHIZO_PCIERR_A_INO)));
        upa_writel(tmp, (pbm->pbm_regs +
                         schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
 
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
+                                                SCHIZO_PCIERR_B_INO)));
        upa_writel(tmp, (pbm->pbm_regs +
                         schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
 
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
+                                                SCHIZO_SERR_INO)));
        upa_writel(tmp, (pbm->pbm_regs +
                         schizo_imap_offset(SCHIZO_SERR_INO) + 4));
 
 {
        struct pci_pbm_info *pbm;
        unsigned int irq;
-       struct ino_bucket *bucket;
        u64 tmp, err_mask, err_no_mask;
 
        /* Build IRQs and register handlers. */
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
        upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
 
        pbm = pbm_for_ino(p, SCHIZO_CE_INO);
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
        upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
 
        pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
        upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
 
        pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
        upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
 
        pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
                            pbm->name);
                prom_halt();
        }
-       bucket = __bucket(irq);
-       tmp = upa_readl(bucket->imap);
+       tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
        upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
 
        /* Enable UE and CE interrupts for controller. */