]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/sparc64/kernel/pci_fire.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6-omap-h63xx.git] / arch / sparc64 / kernel / pci_fire.c
1 /* pci_fire.c: Sun4u platform PCI-E controller support.
2  *
3  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/msi.h>
10 #include <linux/irq.h>
11 #include <linux/of_device.h>
12
13 #include <asm/prom.h>
14 #include <asm/irq.h>
15
16 #include "pci_impl.h"
17
18 #define DRIVER_NAME     "fire"
19 #define PFX             DRIVER_NAME ": "
20
21 #define fire_read(__reg) \
22 ({      u64 __ret; \
23         __asm__ __volatile__("ldxa [%1] %2, %0" \
24                              : "=r" (__ret) \
25                              : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
26                              : "memory"); \
27         __ret; \
28 })
29 #define fire_write(__reg, __val) \
30         __asm__ __volatile__("stxa %0, [%1] %2" \
31                              : /* no outputs */ \
32                              : "r" (__val), "r" (__reg), \
33                                "i" (ASI_PHYS_BYPASS_EC_E) \
34                              : "memory")
35
36 #define FIRE_IOMMU_CONTROL      0x40000UL
37 #define FIRE_IOMMU_TSBBASE      0x40008UL
38 #define FIRE_IOMMU_FLUSH        0x40100UL
39 #define FIRE_IOMMU_FLUSHINV     0x40108UL
40
41 static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
42 {
43         struct iommu *iommu = pbm->iommu;
44         u32 vdma[2], dma_mask;
45         u64 control;
46         int tsbsize, err;
47
48         /* No virtual-dma property on these guys, use largest size.  */
49         vdma[0] = 0xc0000000; /* base */
50         vdma[1] = 0x40000000; /* size */
51         dma_mask = 0xffffffff;
52         tsbsize = 128;
53
54         /* Register addresses. */
55         iommu->iommu_control  = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
56         iommu->iommu_tsbbase  = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
57         iommu->iommu_flush    = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
58         iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
59
60         /* We use the main control/status register of FIRE as the write
61          * completion register.
62          */
63         iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
64
65         /*
66          * Invalidate TLB Entries.
67          */
68         fire_write(iommu->iommu_flushinv, ~(u64)0);
69
70         err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
71                                pbm->numa_node);
72         if (err)
73                 return err;
74
75         fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL);
76
77         control = fire_read(iommu->iommu_control);
78         control |= (0x00000400 /* TSB cache snoop enable */     |
79                     0x00000300 /* Cache mode */                 |
80                     0x00000002 /* Bypass enable */              |
81                     0x00000001 /* Translation enable */);
82         fire_write(iommu->iommu_control, control);
83
84         return 0;
85 }
86
87 #ifdef CONFIG_PCI_MSI
88 struct pci_msiq_entry {
89         u64             word0;
90 #define MSIQ_WORD0_RESV                 0x8000000000000000UL
91 #define MSIQ_WORD0_FMT_TYPE             0x7f00000000000000UL
92 #define MSIQ_WORD0_FMT_TYPE_SHIFT       56
93 #define MSIQ_WORD0_LEN                  0x00ffc00000000000UL
94 #define MSIQ_WORD0_LEN_SHIFT            46
95 #define MSIQ_WORD0_ADDR0                0x00003fff00000000UL
96 #define MSIQ_WORD0_ADDR0_SHIFT          32
97 #define MSIQ_WORD0_RID                  0x00000000ffff0000UL
98 #define MSIQ_WORD0_RID_SHIFT            16
99 #define MSIQ_WORD0_DATA0                0x000000000000ffffUL
100 #define MSIQ_WORD0_DATA0_SHIFT          0
101
102 #define MSIQ_TYPE_MSG                   0x6
103 #define MSIQ_TYPE_MSI32                 0xb
104 #define MSIQ_TYPE_MSI64                 0xf
105
106         u64             word1;
107 #define MSIQ_WORD1_ADDR1                0xffffffffffff0000UL
108 #define MSIQ_WORD1_ADDR1_SHIFT          16
109 #define MSIQ_WORD1_DATA1                0x000000000000ffffUL
110 #define MSIQ_WORD1_DATA1_SHIFT          0
111
112         u64             resv[6];
113 };
114
115 /* All MSI registers are offset from pbm->pbm_regs */
116 #define EVENT_QUEUE_BASE_ADDR_REG       0x010000UL
117 #define  EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
118
119 #define EVENT_QUEUE_CONTROL_SET(EQ)     (0x011000UL + (EQ) * 0x8UL)
120 #define  EVENT_QUEUE_CONTROL_SET_OFLOW  0x0200000000000000UL
121 #define  EVENT_QUEUE_CONTROL_SET_EN     0x0000100000000000UL
122
123 #define EVENT_QUEUE_CONTROL_CLEAR(EQ)   (0x011200UL + (EQ) * 0x8UL)
124 #define  EVENT_QUEUE_CONTROL_CLEAR_OF   0x0200000000000000UL
125 #define  EVENT_QUEUE_CONTROL_CLEAR_E2I  0x0000800000000000UL
126 #define  EVENT_QUEUE_CONTROL_CLEAR_DIS  0x0000100000000000UL
127
128 #define EVENT_QUEUE_STATE(EQ)           (0x011400UL + (EQ) * 0x8UL)
129 #define  EVENT_QUEUE_STATE_MASK         0x0000000000000007UL
130 #define  EVENT_QUEUE_STATE_IDLE         0x0000000000000001UL
131 #define  EVENT_QUEUE_STATE_ACTIVE       0x0000000000000002UL
132 #define  EVENT_QUEUE_STATE_ERROR        0x0000000000000004UL
133
134 #define EVENT_QUEUE_TAIL(EQ)            (0x011600UL + (EQ) * 0x8UL)
135 #define  EVENT_QUEUE_TAIL_OFLOW         0x0200000000000000UL
136 #define  EVENT_QUEUE_TAIL_VAL           0x000000000000007fUL
137
138 #define EVENT_QUEUE_HEAD(EQ)            (0x011800UL + (EQ) * 0x8UL)
139 #define  EVENT_QUEUE_HEAD_VAL           0x000000000000007fUL
140
141 #define MSI_MAP(MSI)                    (0x020000UL + (MSI) * 0x8UL)
142 #define  MSI_MAP_VALID                  0x8000000000000000UL
143 #define  MSI_MAP_EQWR_N                 0x4000000000000000UL
144 #define  MSI_MAP_EQNUM                  0x000000000000003fUL
145
146 #define MSI_CLEAR(MSI)                  (0x028000UL + (MSI) * 0x8UL)
147 #define  MSI_CLEAR_EQWR_N               0x4000000000000000UL
148
149 #define IMONDO_DATA0                    0x02C000UL
150 #define  IMONDO_DATA0_DATA              0xffffffffffffffc0UL
151
152 #define IMONDO_DATA1                    0x02C008UL
153 #define  IMONDO_DATA1_DATA              0xffffffffffffffffUL
154
155 #define MSI_32BIT_ADDR                  0x034000UL
156 #define  MSI_32BIT_ADDR_VAL             0x00000000ffff0000UL
157
158 #define MSI_64BIT_ADDR                  0x034008UL
159 #define  MSI_64BIT_ADDR_VAL             0xffffffffffff0000UL
160
161 static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
162                              unsigned long *head)
163 {
164         *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
165         return 0;
166 }
167
168 static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
169                                 unsigned long *head, unsigned long *msi)
170 {
171         unsigned long type_fmt, type, msi_num;
172         struct pci_msiq_entry *base, *ep;
173
174         base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
175         ep = &base[*head];
176
177         if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
178                 return 0;
179
180         type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
181                     MSIQ_WORD0_FMT_TYPE_SHIFT);
182         type = (type_fmt >> 3);
183         if (unlikely(type != MSIQ_TYPE_MSI32 &&
184                      type != MSIQ_TYPE_MSI64))
185                 return -EINVAL;
186
187         *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
188                           MSIQ_WORD0_DATA0_SHIFT);
189
190         fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
191                    MSI_CLEAR_EQWR_N);
192
193         /* Clear the entry.  */
194         ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
195
196         /* Go to next entry in ring.  */
197         (*head)++;
198         if (*head >= pbm->msiq_ent_count)
199                 *head = 0;
200
201         return 1;
202 }
203
204 static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
205                              unsigned long head)
206 {
207         fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
208         return 0;
209 }
210
211 static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
212                               unsigned long msi, int is_msi64)
213 {
214         u64 val;
215
216         val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
217         val &= ~(MSI_MAP_EQNUM);
218         val |= msiqid;
219         fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
220
221         fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
222                    MSI_CLEAR_EQWR_N);
223
224         val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
225         val |= MSI_MAP_VALID;
226         fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
227
228         return 0;
229 }
230
231 static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
232 {
233         unsigned long msiqid;
234         u64 val;
235
236         val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
237         msiqid = (val & MSI_MAP_EQNUM);
238
239         val &= ~MSI_MAP_VALID;
240
241         fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
242
243         return 0;
244 }
245
246 static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
247 {
248         unsigned long pages, order, i;
249
250         order = get_order(512 * 1024);
251         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
252         if (pages == 0UL) {
253                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
254                        order);
255                 return -ENOMEM;
256         }
257         memset((char *)pages, 0, PAGE_SIZE << order);
258         pbm->msi_queues = (void *) pages;
259
260         fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
261                    (EVENT_QUEUE_BASE_ADDR_ALL_ONES |
262                     __pa(pbm->msi_queues)));
263
264         fire_write(pbm->pbm_regs + IMONDO_DATA0,
265                    pbm->portid << 6);
266         fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
267
268         fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
269                    pbm->msi32_start);
270         fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
271                    pbm->msi64_start);
272
273         for (i = 0; i < pbm->msiq_num; i++) {
274                 fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
275                 fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
276         }
277
278         return 0;
279 }
280
281 static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
282 {
283         unsigned long pages, order;
284
285         order = get_order(512 * 1024);
286         pages = (unsigned long) pbm->msi_queues;
287
288         free_pages(pages, order);
289
290         pbm->msi_queues = NULL;
291 }
292
293 static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
294                                    unsigned long msiqid,
295                                    unsigned long devino)
296 {
297         unsigned long cregs = (unsigned long) pbm->pbm_regs;
298         unsigned long imap_reg, iclr_reg, int_ctrlr;
299         unsigned int virt_irq;
300         int fixup;
301         u64 val;
302
303         imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
304         iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
305
306         /* XXX iterate amongst the 4 IRQ controllers XXX */
307         int_ctrlr = (1UL << 6);
308
309         val = fire_read(imap_reg);
310         val |= (1UL << 63) | int_ctrlr;
311         fire_write(imap_reg, val);
312
313         fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
314
315         virt_irq = build_irq(fixup, iclr_reg, imap_reg);
316         if (!virt_irq)
317                 return -ENOMEM;
318
319         fire_write(pbm->pbm_regs +
320                    EVENT_QUEUE_CONTROL_SET(msiqid),
321                    EVENT_QUEUE_CONTROL_SET_EN);
322
323         return virt_irq;
324 }
325
326 static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
327         .get_head       =       pci_fire_get_head,
328         .dequeue_msi    =       pci_fire_dequeue_msi,
329         .set_head       =       pci_fire_set_head,
330         .msi_setup      =       pci_fire_msi_setup,
331         .msi_teardown   =       pci_fire_msi_teardown,
332         .msiq_alloc     =       pci_fire_msiq_alloc,
333         .msiq_free      =       pci_fire_msiq_free,
334         .msiq_build_irq =       pci_fire_msiq_build_irq,
335 };
336
337 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
338 {
339         sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
340 }
341 #else /* CONFIG_PCI_MSI */
342 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
343 {
344 }
345 #endif /* !(CONFIG_PCI_MSI) */
346
347 /* Based at pbm->controller_regs */
348 #define FIRE_PARITY_CONTROL     0x470010UL
349 #define  FIRE_PARITY_ENAB       0x8000000000000000UL
350 #define FIRE_FATAL_RESET_CTL    0x471028UL
351 #define  FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
352 #define  FIRE_FATAL_RESET_MB    0x0000000002000000UL
353 #define  FIRE_FATAL_RESET_CPE   0x0000000000008000UL
354 #define  FIRE_FATAL_RESET_APE   0x0000000000004000UL
355 #define  FIRE_FATAL_RESET_PIO   0x0000000000000040UL
356 #define  FIRE_FATAL_RESET_JW    0x0000000000000004UL
357 #define  FIRE_FATAL_RESET_JI    0x0000000000000002UL
358 #define  FIRE_FATAL_RESET_JR    0x0000000000000001UL
359 #define FIRE_CORE_INTR_ENABLE   0x471800UL
360
361 /* Based at pbm->pbm_regs */
362 #define FIRE_TLU_CTRL           0x80000UL
363 #define  FIRE_TLU_CTRL_TIM      0x00000000da000000UL
364 #define  FIRE_TLU_CTRL_QDET     0x0000000000000100UL
365 #define  FIRE_TLU_CTRL_CFG      0x0000000000000001UL
366 #define FIRE_TLU_DEV_CTRL       0x90008UL
367 #define FIRE_TLU_LINK_CTRL      0x90020UL
368 #define FIRE_TLU_LINK_CTRL_CLK  0x0000000000000040UL
369 #define FIRE_LPU_RESET          0xe2008UL
370 #define FIRE_LPU_LLCFG          0xe2200UL
371 #define  FIRE_LPU_LLCFG_VC0     0x0000000000000100UL
372 #define FIRE_LPU_FCTRL_UCTRL    0xe2240UL
373 #define  FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
374 #define  FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
375 #define FIRE_LPU_TXL_FIFOP      0xe2430UL
376 #define FIRE_LPU_LTSSM_CFG2     0xe2788UL
377 #define FIRE_LPU_LTSSM_CFG3     0xe2790UL
378 #define FIRE_LPU_LTSSM_CFG4     0xe2798UL
379 #define FIRE_LPU_LTSSM_CFG5     0xe27a0UL
380 #define FIRE_DMC_IENAB          0x31800UL
381 #define FIRE_DMC_DBG_SEL_A      0x53000UL
382 #define FIRE_DMC_DBG_SEL_B      0x53008UL
383 #define FIRE_PEC_IENAB          0x51800UL
384
385 static void pci_fire_hw_init(struct pci_pbm_info *pbm)
386 {
387         u64 val;
388
389         fire_write(pbm->controller_regs + FIRE_PARITY_CONTROL,
390                    FIRE_PARITY_ENAB);
391
392         fire_write(pbm->controller_regs + FIRE_FATAL_RESET_CTL,
393                    (FIRE_FATAL_RESET_SPARE |
394                     FIRE_FATAL_RESET_MB |
395                     FIRE_FATAL_RESET_CPE |
396                     FIRE_FATAL_RESET_APE |
397                     FIRE_FATAL_RESET_PIO |
398                     FIRE_FATAL_RESET_JW |
399                     FIRE_FATAL_RESET_JI |
400                     FIRE_FATAL_RESET_JR));
401
402         fire_write(pbm->controller_regs + FIRE_CORE_INTR_ENABLE, ~(u64)0);
403
404         val = fire_read(pbm->pbm_regs + FIRE_TLU_CTRL);
405         val |= (FIRE_TLU_CTRL_TIM |
406                 FIRE_TLU_CTRL_QDET |
407                 FIRE_TLU_CTRL_CFG);
408         fire_write(pbm->pbm_regs + FIRE_TLU_CTRL, val);
409         fire_write(pbm->pbm_regs + FIRE_TLU_DEV_CTRL, 0);
410         fire_write(pbm->pbm_regs + FIRE_TLU_LINK_CTRL,
411                    FIRE_TLU_LINK_CTRL_CLK);
412
413         fire_write(pbm->pbm_regs + FIRE_LPU_RESET, 0);
414         fire_write(pbm->pbm_regs + FIRE_LPU_LLCFG,
415                    FIRE_LPU_LLCFG_VC0);
416         fire_write(pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL,
417                    (FIRE_LPU_FCTRL_UCTRL_N |
418                     FIRE_LPU_FCTRL_UCTRL_P));
419         fire_write(pbm->pbm_regs + FIRE_LPU_TXL_FIFOP,
420                    ((0xffff << 16) | (0x0000 << 0)));
421         fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2, 3000000);
422         fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3, 500000);
423         fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4,
424                    (2 << 16) | (140 << 8));
425         fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5, 0);
426
427         fire_write(pbm->pbm_regs + FIRE_DMC_IENAB, ~(u64)0);
428         fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_A, 0);
429         fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_B, 0);
430
431         fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0);
432 }
433
434 static int __init pci_fire_pbm_init(struct pci_controller_info *p,
435                                     struct of_device *op, u32 portid)
436 {
437         const struct linux_prom64_registers *regs;
438         struct device_node *dp = op->node;
439         struct pci_pbm_info *pbm;
440         int err;
441
442         if ((portid & 1) == 0)
443                 pbm = &p->pbm_A;
444         else
445                 pbm = &p->pbm_B;
446
447         pbm->next = pci_pbm_root;
448         pci_pbm_root = pbm;
449
450         pbm->numa_node = -1;
451
452         pbm->pci_ops = &sun4u_pci_ops;
453         pbm->config_space_reg_bits = 12;
454
455         pbm->index = pci_num_pbms++;
456
457         pbm->portid = portid;
458         pbm->parent = p;
459         pbm->prom_node = dp;
460         pbm->name = dp->full_name;
461
462         regs = of_get_property(dp, "reg", NULL);
463         pbm->pbm_regs = regs[0].phys_addr;
464         pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
465
466         printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
467
468         pci_determine_mem_io_space(pbm);
469
470         pci_get_pbm_props(pbm);
471
472         pci_fire_hw_init(pbm);
473
474         err = pci_fire_pbm_iommu_init(pbm);
475         if (err)
476                 return err;
477
478         pci_fire_msi_init(pbm);
479
480         pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
481
482         /* XXX register error interrupt handlers XXX */
483
484         return 0;
485 }
486
487 static inline int portid_compare(u32 x, u32 y)
488 {
489         if (x == (y ^ 1))
490                 return 1;
491         return 0;
492 }
493
494 static int __devinit fire_probe(struct of_device *op,
495                                 const struct of_device_id *match)
496 {
497         struct device_node *dp = op->node;
498         struct pci_controller_info *p;
499         struct pci_pbm_info *pbm;
500         struct iommu *iommu;
501         u32 portid;
502         int err;
503
504         portid = of_getintprop_default(dp, "portid", 0xff);
505         for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
506                 if (portid_compare(pbm->portid, portid))
507                         return pci_fire_pbm_init(pbm->parent, op, portid);
508         }
509
510         err = -ENOMEM;
511         p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
512         if (!p) {
513                 printk(KERN_ERR PFX "Cannot allocate controller info.\n");
514                 goto out_err;
515         }
516
517         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
518         if (!iommu) {
519                 printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n");
520                 goto out_free_controller;
521         }
522
523         p->pbm_A.iommu = iommu;
524
525         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
526         if (!iommu) {
527                 printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n");
528                 goto out_free_iommu_A;
529         }
530
531         p->pbm_B.iommu = iommu;
532
533         return pci_fire_pbm_init(p, op, portid);
534
535 out_free_iommu_A:
536         kfree(p->pbm_A.iommu);
537                         
538 out_free_controller:
539         kfree(p);
540
541 out_err:
542         return err;
543 }
544
545 static struct of_device_id __initdata fire_match[] = {
546         {
547                 .name = "pci",
548                 .compatible = "pciex108e,80f0",
549         },
550         {},
551 };
552
553 static struct of_platform_driver fire_driver = {
554         .name           = DRIVER_NAME,
555         .match_table    = fire_match,
556         .probe          = fire_probe,
557 };
558
559 static int __init fire_init(void)
560 {
561         return of_register_driver(&fire_driver, &of_bus_type);
562 }
563
564 subsys_initcall(fire_init);