]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/ibm_newemac/core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen/avr32-2.6
[linux-2.6-omap-h63xx.git] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h>
48
49 #include "core.h"
50
51 /*
52  * Lack of dma_unmap_???? calls is intentional.
53  *
54  * API-correct usage requires additional support state information to be
55  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56  * EMAC design (e.g. TX buffer passed from network stack can be split into
57  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58  * maintaining such information will add additional overhead.
59  * Current DMA API implementation for 4xx processors only ensures cache coherency
60  * and dma_unmap_???? routines are empty and are likely to stay this way.
61  * I decided to omit dma_unmap_??? calls because I don't want to add additional
62  * complexity just for the sake of following some abstract API, when it doesn't
63  * add any real benefit to the driver. I understand that this decision maybe
64  * controversial, but I really tried to make code API-correct and efficient
65  * at the same time and didn't come up with code I liked :(.                --ebs
66  */
67
68 #define DRV_NAME        "emac"
69 #define DRV_VERSION     "3.54"
70 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
71
72 MODULE_DESCRIPTION(DRV_DESC);
73 MODULE_AUTHOR
74     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
76
77 /*
78  * PPC64 doesn't (yet) have a cacheable_memcpy
79  */
80 #ifdef CONFIG_PPC64
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 #endif
83
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
86
87 /* If packet size is less than this number, we allocate small skb and copy packet
88  * contents into it instead of just sending original big skb up
89  */
90 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93  * to avoid re-using the same PHY ID in cases where the arch didn't
94  * setup precise phy_map entries
95  *
96  * XXX This is something that needs to be reworked as we can have multiple
97  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98  * probably require in that case to have explicit PHY IDs in the device-tree
99  */
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
102
103 /* This is the wait queue used to wait on any event related to probe, that
104  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105  */
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108 /* Having stable interface names is a doomed idea. However, it would be nice
109  * if we didn't have completely random interface names at boot too :-) It's
110  * just a matter of making everybody's life easier. Since we are doing
111  * threaded probing, it's a bit harder though. The base idea here is that
112  * we make up a list of all emacs in the device-tree before we register the
113  * driver. Every emac will then wait for the previous one in the list to
114  * initialize before itself. We should also keep that list ordered by
115  * cell_index.
116  * That list is only 4 entries long, meaning that additional EMACs don't
117  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118  */
119
120 #define EMAC_BOOT_LIST_SIZE     4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
125
126 /* I don't want to litter system log with timeout errors
127  * when we have brain-damaged PHY.
128  */
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
130                                              const char *error)
131 {
132         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133                                   EMAC_FTR_460EX_PHY_CLK_FIX |
134                                   EMAC_FTR_440EP_PHY_CLK_FIX))
135                 DBG(dev, "%s" NL, error);
136         else if (net_ratelimit())
137                 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
138 }
139
140 /* EMAC PHY clock workaround:
141  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142  * which allows controlling each EMAC clock
143  */
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 {
146 #ifdef CONFIG_PPC_DCR_NATIVE
147         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148                 dcri_clrset(SDR0, SDR0_MFR,
149                             0, SDR0_MFR_ECS >> dev->cell_index);
150 #endif
151 }
152
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 {
155 #ifdef CONFIG_PPC_DCR_NATIVE
156         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157                 dcri_clrset(SDR0, SDR0_MFR,
158                             SDR0_MFR_ECS >> dev->cell_index, 0);
159 #endif
160 }
161
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON        HZ
164 #define PHY_POLL_LINK_OFF       (HZ / 5)
165
166 /* Graceful stop timeouts in us.
167  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168  */
169 #define STOP_TIMEOUT_10         1230
170 #define STOP_TIMEOUT_100        124
171 #define STOP_TIMEOUT_1000       13
172 #define STOP_TIMEOUT_1000_JUMBO 73
173
174 static unsigned char default_mcast_addr[] = {
175         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
176 };
177
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191         "tx_bd_excessive_collisions", "tx_bd_late_collision",
192         "tx_bd_multple_collisions", "tx_bd_single_collision",
193         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
194         "tx_errors"
195 };
196
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
200
201 static inline int emac_phy_supports_gige(int phy_mode)
202 {
203         return  phy_mode == PHY_MODE_GMII ||
204                 phy_mode == PHY_MODE_RGMII ||
205                 phy_mode == PHY_MODE_TBI ||
206                 phy_mode == PHY_MODE_RTBI;
207 }
208
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211         return  phy_mode == PHY_MODE_TBI ||
212                 phy_mode == PHY_MODE_RTBI;
213 }
214
215 static inline void emac_tx_enable(struct emac_instance *dev)
216 {
217         struct emac_regs __iomem *p = dev->emacp;
218         u32 r;
219
220         DBG(dev, "tx_enable" NL);
221
222         r = in_be32(&p->mr0);
223         if (!(r & EMAC_MR0_TXE))
224                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
225 }
226
227 static void emac_tx_disable(struct emac_instance *dev)
228 {
229         struct emac_regs __iomem *p = dev->emacp;
230         u32 r;
231
232         DBG(dev, "tx_disable" NL);
233
234         r = in_be32(&p->mr0);
235         if (r & EMAC_MR0_TXE) {
236                 int n = dev->stop_timeout;
237                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
238                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
239                         udelay(1);
240                         --n;
241                 }
242                 if (unlikely(!n))
243                         emac_report_timeout_error(dev, "TX disable timeout");
244         }
245 }
246
247 static void emac_rx_enable(struct emac_instance *dev)
248 {
249         struct emac_regs __iomem *p = dev->emacp;
250         u32 r;
251
252         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
253                 goto out;
254
255         DBG(dev, "rx_enable" NL);
256
257         r = in_be32(&p->mr0);
258         if (!(r & EMAC_MR0_RXE)) {
259                 if (unlikely(!(r & EMAC_MR0_RXI))) {
260                         /* Wait if previous async disable is still in progress */
261                         int n = dev->stop_timeout;
262                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
263                                 udelay(1);
264                                 --n;
265                         }
266                         if (unlikely(!n))
267                                 emac_report_timeout_error(dev,
268                                                           "RX disable timeout");
269                 }
270                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
271         }
272  out:
273         ;
274 }
275
276 static void emac_rx_disable(struct emac_instance *dev)
277 {
278         struct emac_regs __iomem *p = dev->emacp;
279         u32 r;
280
281         DBG(dev, "rx_disable" NL);
282
283         r = in_be32(&p->mr0);
284         if (r & EMAC_MR0_RXE) {
285                 int n = dev->stop_timeout;
286                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
287                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
288                         udelay(1);
289                         --n;
290                 }
291                 if (unlikely(!n))
292                         emac_report_timeout_error(dev, "RX disable timeout");
293         }
294 }
295
296 static inline void emac_netif_stop(struct emac_instance *dev)
297 {
298         netif_tx_lock_bh(dev->ndev);
299         netif_addr_lock(dev->ndev);
300         dev->no_mcast = 1;
301         netif_addr_unlock(dev->ndev);
302         netif_tx_unlock_bh(dev->ndev);
303         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
304         mal_poll_disable(dev->mal, &dev->commac);
305         netif_tx_disable(dev->ndev);
306 }
307
308 static inline void emac_netif_start(struct emac_instance *dev)
309 {
310         netif_tx_lock_bh(dev->ndev);
311         netif_addr_lock(dev->ndev);
312         dev->no_mcast = 0;
313         if (dev->mcast_pending && netif_running(dev->ndev))
314                 __emac_set_multicast_list(dev);
315         netif_addr_unlock(dev->ndev);
316         netif_tx_unlock_bh(dev->ndev);
317
318         netif_wake_queue(dev->ndev);
319
320         /* NOTE: unconditional netif_wake_queue is only appropriate
321          * so long as all callers are assured to have free tx slots
322          * (taken from tg3... though the case where that is wrong is
323          *  not terribly harmful)
324          */
325         mal_poll_enable(dev->mal, &dev->commac);
326 }
327
328 static inline void emac_rx_disable_async(struct emac_instance *dev)
329 {
330         struct emac_regs __iomem *p = dev->emacp;
331         u32 r;
332
333         DBG(dev, "rx_disable_async" NL);
334
335         r = in_be32(&p->mr0);
336         if (r & EMAC_MR0_RXE)
337                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
338 }
339
340 static int emac_reset(struct emac_instance *dev)
341 {
342         struct emac_regs __iomem *p = dev->emacp;
343         int n = 20;
344
345         DBG(dev, "reset" NL);
346
347         if (!dev->reset_failed) {
348                 /* 40x erratum suggests stopping RX channel before reset,
349                  * we stop TX as well
350                  */
351                 emac_rx_disable(dev);
352                 emac_tx_disable(dev);
353         }
354
355 #ifdef CONFIG_PPC_DCR_NATIVE
356         /* Enable internal clock source */
357         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
358                 dcri_clrset(SDR0, SDR0_ETH_CFG,
359                             0, SDR0_ETH_CFG_ECS << dev->cell_index);
360 #endif
361
362         out_be32(&p->mr0, EMAC_MR0_SRST);
363         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
364                 --n;
365
366 #ifdef CONFIG_PPC_DCR_NATIVE
367          /* Enable external clock source */
368         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
369                 dcri_clrset(SDR0, SDR0_ETH_CFG,
370                             SDR0_ETH_CFG_ECS << dev->cell_index, 0);
371 #endif
372
373         if (n) {
374                 dev->reset_failed = 0;
375                 return 0;
376         } else {
377                 emac_report_timeout_error(dev, "reset timeout");
378                 dev->reset_failed = 1;
379                 return -ETIMEDOUT;
380         }
381 }
382
383 static void emac_hash_mc(struct emac_instance *dev)
384 {
385         const int regs = EMAC_XAHT_REGS(dev);
386         u32 *gaht_base = emac_gaht_base(dev);
387         u32 gaht_temp[regs];
388         struct dev_mc_list *dmi;
389         int i;
390
391         DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
392
393         memset(gaht_temp, 0, sizeof (gaht_temp));
394
395         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
396                 int slot, reg, mask;
397                 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
398                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
399                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
400
401                 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
402                 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
403                 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
404
405                 gaht_temp[reg] |= mask;
406         }
407
408         for (i = 0; i < regs; i++)
409                 out_be32(gaht_base + i, gaht_temp[i]);
410 }
411
412 static inline u32 emac_iff2rmr(struct net_device *ndev)
413 {
414         struct emac_instance *dev = netdev_priv(ndev);
415         u32 r;
416
417         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
418
419         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
420             r |= EMAC4_RMR_BASE;
421         else
422             r |= EMAC_RMR_BASE;
423
424         if (ndev->flags & IFF_PROMISC)
425                 r |= EMAC_RMR_PME;
426         else if (ndev->flags & IFF_ALLMULTI ||
427                          (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
428                 r |= EMAC_RMR_PMME;
429         else if (ndev->mc_count > 0)
430                 r |= EMAC_RMR_MAE;
431
432         return r;
433 }
434
435 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
436 {
437         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
438
439         DBG2(dev, "__emac_calc_base_mr1" NL);
440
441         switch(tx_size) {
442         case 2048:
443                 ret |= EMAC_MR1_TFS_2K;
444                 break;
445         default:
446                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
447                        dev->ndev->name, tx_size);
448         }
449
450         switch(rx_size) {
451         case 16384:
452                 ret |= EMAC_MR1_RFS_16K;
453                 break;
454         case 4096:
455                 ret |= EMAC_MR1_RFS_4K;
456                 break;
457         default:
458                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
459                        dev->ndev->name, rx_size);
460         }
461
462         return ret;
463 }
464
465 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
466 {
467         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
468                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
469
470         DBG2(dev, "__emac4_calc_base_mr1" NL);
471
472         switch(tx_size) {
473         case 4096:
474                 ret |= EMAC4_MR1_TFS_4K;
475                 break;
476         case 2048:
477                 ret |= EMAC4_MR1_TFS_2K;
478                 break;
479         default:
480                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
481                        dev->ndev->name, tx_size);
482         }
483
484         switch(rx_size) {
485         case 16384:
486                 ret |= EMAC4_MR1_RFS_16K;
487                 break;
488         case 4096:
489                 ret |= EMAC4_MR1_RFS_4K;
490                 break;
491         case 2048:
492                 ret |= EMAC4_MR1_RFS_2K;
493                 break;
494         default:
495                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
496                        dev->ndev->name, rx_size);
497         }
498
499         return ret;
500 }
501
502 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
503 {
504         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
505                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
506                 __emac_calc_base_mr1(dev, tx_size, rx_size);
507 }
508
509 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
510 {
511         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
512                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
513         else
514                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
515 }
516
517 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
518                                  unsigned int low, unsigned int high)
519 {
520         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
521                 return (low << 22) | ( (high & 0x3ff) << 6);
522         else
523                 return (low << 23) | ( (high & 0x1ff) << 7);
524 }
525
526 static int emac_configure(struct emac_instance *dev)
527 {
528         struct emac_regs __iomem *p = dev->emacp;
529         struct net_device *ndev = dev->ndev;
530         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
531         u32 r, mr1 = 0;
532
533         DBG(dev, "configure" NL);
534
535         if (!link) {
536                 out_be32(&p->mr1, in_be32(&p->mr1)
537                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
538                 udelay(100);
539         } else if (emac_reset(dev) < 0)
540                 return -ETIMEDOUT;
541
542         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
543                 tah_reset(dev->tah_dev);
544
545         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
546             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
547
548         /* Default fifo sizes */
549         tx_size = dev->tx_fifo_size;
550         rx_size = dev->rx_fifo_size;
551
552         /* No link, force loopback */
553         if (!link)
554                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
555
556         /* Check for full duplex */
557         else if (dev->phy.duplex == DUPLEX_FULL)
558                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
559
560         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
561         dev->stop_timeout = STOP_TIMEOUT_10;
562         switch (dev->phy.speed) {
563         case SPEED_1000:
564                 if (emac_phy_gpcs(dev->phy.mode)) {
565                         mr1 |= EMAC_MR1_MF_1000GPCS |
566                                 EMAC_MR1_MF_IPPA(dev->phy.address);
567
568                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
569                          * identify this GPCS PHY later.
570                          */
571                         out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
572                 } else
573                         mr1 |= EMAC_MR1_MF_1000;
574
575                 /* Extended fifo sizes */
576                 tx_size = dev->tx_fifo_size_gige;
577                 rx_size = dev->rx_fifo_size_gige;
578
579                 if (dev->ndev->mtu > ETH_DATA_LEN) {
580                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
581                                 mr1 |= EMAC4_MR1_JPSM;
582                         else
583                                 mr1 |= EMAC_MR1_JPSM;
584                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
585                 } else
586                         dev->stop_timeout = STOP_TIMEOUT_1000;
587                 break;
588         case SPEED_100:
589                 mr1 |= EMAC_MR1_MF_100;
590                 dev->stop_timeout = STOP_TIMEOUT_100;
591                 break;
592         default: /* make gcc happy */
593                 break;
594         }
595
596         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
597                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
598                                 dev->phy.speed);
599         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
600                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
601
602         /* on 40x erratum forces us to NOT use integrated flow control,
603          * let's hope it works on 44x ;)
604          */
605         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
606             dev->phy.duplex == DUPLEX_FULL) {
607                 if (dev->phy.pause)
608                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
609                 else if (dev->phy.asym_pause)
610                         mr1 |= EMAC_MR1_APP;
611         }
612
613         /* Add base settings & fifo sizes & program MR1 */
614         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
615         out_be32(&p->mr1, mr1);
616
617         /* Set individual MAC address */
618         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
619         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
620                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
621                  ndev->dev_addr[5]);
622
623         /* VLAN Tag Protocol ID */
624         out_be32(&p->vtpid, 0x8100);
625
626         /* Receive mode register */
627         r = emac_iff2rmr(ndev);
628         if (r & EMAC_RMR_MAE)
629                 emac_hash_mc(dev);
630         out_be32(&p->rmr, r);
631
632         /* FIFOs thresholds */
633         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
634                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
635                                tx_size / 2 / dev->fifo_entry_size);
636         else
637                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
638                               tx_size / 2 / dev->fifo_entry_size);
639         out_be32(&p->tmr1, r);
640         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
641
642         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
643            there should be still enough space in FIFO to allow the our link
644            partner time to process this frame and also time to send PAUSE
645            frame itself.
646
647            Here is the worst case scenario for the RX FIFO "headroom"
648            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
649
650            1) One maximum-length frame on TX                    1522 bytes
651            2) One PAUSE frame time                                64 bytes
652            3) PAUSE frame decode time allowance                   64 bytes
653            4) One maximum-length frame on RX                    1522 bytes
654            5) Round-trip propagation delay of the link (100Mb)    15 bytes
655            ----------
656            3187 bytes
657
658            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
659            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
660          */
661         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
662                            rx_size / 4 / dev->fifo_entry_size);
663         out_be32(&p->rwmr, r);
664
665         /* Set PAUSE timer to the maximum */
666         out_be32(&p->ptr, 0xffff);
667
668         /* IRQ sources */
669         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
670                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
671                 EMAC_ISR_IRE | EMAC_ISR_TE;
672         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
673             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
674                                                   EMAC4_ISR_RXOE | */;
675         out_be32(&p->iser,  r);
676
677         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
678         if (emac_phy_gpcs(dev->phy.mode))
679                 emac_mii_reset_phy(&dev->phy);
680
681         return 0;
682 }
683
684 static void emac_reinitialize(struct emac_instance *dev)
685 {
686         DBG(dev, "reinitialize" NL);
687
688         emac_netif_stop(dev);
689         if (!emac_configure(dev)) {
690                 emac_tx_enable(dev);
691                 emac_rx_enable(dev);
692         }
693         emac_netif_start(dev);
694 }
695
696 static void emac_full_tx_reset(struct emac_instance *dev)
697 {
698         DBG(dev, "full_tx_reset" NL);
699
700         emac_tx_disable(dev);
701         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
702         emac_clean_tx_ring(dev);
703         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
704
705         emac_configure(dev);
706
707         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
708         emac_tx_enable(dev);
709         emac_rx_enable(dev);
710 }
711
712 static void emac_reset_work(struct work_struct *work)
713 {
714         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
715
716         DBG(dev, "reset_work" NL);
717
718         mutex_lock(&dev->link_lock);
719         if (dev->opened) {
720                 emac_netif_stop(dev);
721                 emac_full_tx_reset(dev);
722                 emac_netif_start(dev);
723         }
724         mutex_unlock(&dev->link_lock);
725 }
726
727 static void emac_tx_timeout(struct net_device *ndev)
728 {
729         struct emac_instance *dev = netdev_priv(ndev);
730
731         DBG(dev, "tx_timeout" NL);
732
733         schedule_work(&dev->reset_work);
734 }
735
736
737 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
738 {
739         int done = !!(stacr & EMAC_STACR_OC);
740
741         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
742                 done = !done;
743
744         return done;
745 };
746
747 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
748 {
749         struct emac_regs __iomem *p = dev->emacp;
750         u32 r = 0;
751         int n, err = -ETIMEDOUT;
752
753         mutex_lock(&dev->mdio_lock);
754
755         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
756
757         /* Enable proper MDIO port */
758         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
759                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
760         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
761                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
762
763         /* Wait for management interface to become idle */
764         n = 20;
765         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
766                 udelay(1);
767                 if (!--n) {
768                         DBG2(dev, " -> timeout wait idle\n");
769                         goto bail;
770                 }
771         }
772
773         /* Issue read command */
774         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
775                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
776         else
777                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
778         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
779                 r |= EMAC_STACR_OC;
780         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
781                 r |= EMACX_STACR_STAC_READ;
782         else
783                 r |= EMAC_STACR_STAC_READ;
784         r |= (reg & EMAC_STACR_PRA_MASK)
785                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
786         out_be32(&p->stacr, r);
787
788         /* Wait for read to complete */
789         n = 200;
790         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
791                 udelay(1);
792                 if (!--n) {
793                         DBG2(dev, " -> timeout wait complete\n");
794                         goto bail;
795                 }
796         }
797
798         if (unlikely(r & EMAC_STACR_PHYE)) {
799                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
800                 err = -EREMOTEIO;
801                 goto bail;
802         }
803
804         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
805
806         DBG2(dev, "mdio_read -> %04x" NL, r);
807         err = 0;
808  bail:
809         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
810                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
811         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
813         mutex_unlock(&dev->mdio_lock);
814
815         return err == 0 ? r : err;
816 }
817
818 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
819                               u16 val)
820 {
821         struct emac_regs __iomem *p = dev->emacp;
822         u32 r = 0;
823         int n, err = -ETIMEDOUT;
824
825         mutex_lock(&dev->mdio_lock);
826
827         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
828
829         /* Enable proper MDIO port */
830         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
831                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
832         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
833                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
834
835         /* Wait for management interface to be idle */
836         n = 20;
837         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
838                 udelay(1);
839                 if (!--n) {
840                         DBG2(dev, " -> timeout wait idle\n");
841                         goto bail;
842                 }
843         }
844
845         /* Issue write command */
846         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
847                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
848         else
849                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
850         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
851                 r |= EMAC_STACR_OC;
852         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
853                 r |= EMACX_STACR_STAC_WRITE;
854         else
855                 r |= EMAC_STACR_STAC_WRITE;
856         r |= (reg & EMAC_STACR_PRA_MASK) |
857                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
858                 (val << EMAC_STACR_PHYD_SHIFT);
859         out_be32(&p->stacr, r);
860
861         /* Wait for write to complete */
862         n = 200;
863         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
864                 udelay(1);
865                 if (!--n) {
866                         DBG2(dev, " -> timeout wait complete\n");
867                         goto bail;
868                 }
869         }
870         err = 0;
871  bail:
872         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
873                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
874         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
875                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
876         mutex_unlock(&dev->mdio_lock);
877 }
878
879 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
880 {
881         struct emac_instance *dev = netdev_priv(ndev);
882         int res;
883
884         res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
885                                (u8) id, (u8) reg);
886         return res;
887 }
888
889 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
890 {
891         struct emac_instance *dev = netdev_priv(ndev);
892
893         __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
894                           (u8) id, (u8) reg, (u16) val);
895 }
896
897 /* Tx lock BH */
898 static void __emac_set_multicast_list(struct emac_instance *dev)
899 {
900         struct emac_regs __iomem *p = dev->emacp;
901         u32 rmr = emac_iff2rmr(dev->ndev);
902
903         DBG(dev, "__multicast %08x" NL, rmr);
904
905         /* I decided to relax register access rules here to avoid
906          * full EMAC reset.
907          *
908          * There is a real problem with EMAC4 core if we use MWSW_001 bit
909          * in MR1 register and do a full EMAC reset.
910          * One TX BD status update is delayed and, after EMAC reset, it
911          * never happens, resulting in TX hung (it'll be recovered by TX
912          * timeout handler eventually, but this is just gross).
913          * So we either have to do full TX reset or try to cheat here :)
914          *
915          * The only required change is to RX mode register, so I *think* all
916          * we need is just to stop RX channel. This seems to work on all
917          * tested SoCs.                                                --ebs
918          *
919          * If we need the full reset, we might just trigger the workqueue
920          * and do it async... a bit nasty but should work --BenH
921          */
922         dev->mcast_pending = 0;
923         emac_rx_disable(dev);
924         if (rmr & EMAC_RMR_MAE)
925                 emac_hash_mc(dev);
926         out_be32(&p->rmr, rmr);
927         emac_rx_enable(dev);
928 }
929
930 /* Tx lock BH */
931 static void emac_set_multicast_list(struct net_device *ndev)
932 {
933         struct emac_instance *dev = netdev_priv(ndev);
934
935         DBG(dev, "multicast" NL);
936
937         BUG_ON(!netif_running(dev->ndev));
938
939         if (dev->no_mcast) {
940                 dev->mcast_pending = 1;
941                 return;
942         }
943         __emac_set_multicast_list(dev);
944 }
945
946 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
947 {
948         int rx_sync_size = emac_rx_sync_size(new_mtu);
949         int rx_skb_size = emac_rx_skb_size(new_mtu);
950         int i, ret = 0;
951
952         mutex_lock(&dev->link_lock);
953         emac_netif_stop(dev);
954         emac_rx_disable(dev);
955         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
956
957         if (dev->rx_sg_skb) {
958                 ++dev->estats.rx_dropped_resize;
959                 dev_kfree_skb(dev->rx_sg_skb);
960                 dev->rx_sg_skb = NULL;
961         }
962
963         /* Make a first pass over RX ring and mark BDs ready, dropping
964          * non-processed packets on the way. We need this as a separate pass
965          * to simplify error recovery in the case of allocation failure later.
966          */
967         for (i = 0; i < NUM_RX_BUFF; ++i) {
968                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
969                         ++dev->estats.rx_dropped_resize;
970
971                 dev->rx_desc[i].data_len = 0;
972                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
973                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
974         }
975
976         /* Reallocate RX ring only if bigger skb buffers are required */
977         if (rx_skb_size <= dev->rx_skb_size)
978                 goto skip;
979
980         /* Second pass, allocate new skbs */
981         for (i = 0; i < NUM_RX_BUFF; ++i) {
982                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
983                 if (!skb) {
984                         ret = -ENOMEM;
985                         goto oom;
986                 }
987
988                 BUG_ON(!dev->rx_skb[i]);
989                 dev_kfree_skb(dev->rx_skb[i]);
990
991                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
992                 dev->rx_desc[i].data_ptr =
993                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
994                                    DMA_FROM_DEVICE) + 2;
995                 dev->rx_skb[i] = skb;
996         }
997  skip:
998         /* Check if we need to change "Jumbo" bit in MR1 */
999         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1000                 /* This is to prevent starting RX channel in emac_rx_enable() */
1001                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1002
1003                 dev->ndev->mtu = new_mtu;
1004                 emac_full_tx_reset(dev);
1005         }
1006
1007         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1008  oom:
1009         /* Restart RX */
1010         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1011         dev->rx_slot = 0;
1012         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1013         emac_rx_enable(dev);
1014         emac_netif_start(dev);
1015         mutex_unlock(&dev->link_lock);
1016
1017         return ret;
1018 }
1019
1020 /* Process ctx, rtnl_lock semaphore */
1021 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1022 {
1023         struct emac_instance *dev = netdev_priv(ndev);
1024         int ret = 0;
1025
1026         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1027                 return -EINVAL;
1028
1029         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1030
1031         if (netif_running(ndev)) {
1032                 /* Check if we really need to reinitalize RX ring */
1033                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1034                         ret = emac_resize_rx_ring(dev, new_mtu);
1035         }
1036
1037         if (!ret) {
1038                 ndev->mtu = new_mtu;
1039                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1040                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1041         }
1042
1043         return ret;
1044 }
1045
1046 static void emac_clean_tx_ring(struct emac_instance *dev)
1047 {
1048         int i;
1049
1050         for (i = 0; i < NUM_TX_BUFF; ++i) {
1051                 if (dev->tx_skb[i]) {
1052                         dev_kfree_skb(dev->tx_skb[i]);
1053                         dev->tx_skb[i] = NULL;
1054                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1055                                 ++dev->estats.tx_dropped;
1056                 }
1057                 dev->tx_desc[i].ctrl = 0;
1058                 dev->tx_desc[i].data_ptr = 0;
1059         }
1060 }
1061
1062 static void emac_clean_rx_ring(struct emac_instance *dev)
1063 {
1064         int i;
1065
1066         for (i = 0; i < NUM_RX_BUFF; ++i)
1067                 if (dev->rx_skb[i]) {
1068                         dev->rx_desc[i].ctrl = 0;
1069                         dev_kfree_skb(dev->rx_skb[i]);
1070                         dev->rx_skb[i] = NULL;
1071                         dev->rx_desc[i].data_ptr = 0;
1072                 }
1073
1074         if (dev->rx_sg_skb) {
1075                 dev_kfree_skb(dev->rx_sg_skb);
1076                 dev->rx_sg_skb = NULL;
1077         }
1078 }
1079
1080 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1081                                     gfp_t flags)
1082 {
1083         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1084         if (unlikely(!skb))
1085                 return -ENOMEM;
1086
1087         dev->rx_skb[slot] = skb;
1088         dev->rx_desc[slot].data_len = 0;
1089
1090         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1091         dev->rx_desc[slot].data_ptr =
1092             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1093                            DMA_FROM_DEVICE) + 2;
1094         wmb();
1095         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1096             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1097
1098         return 0;
1099 }
1100
1101 static void emac_print_link_status(struct emac_instance *dev)
1102 {
1103         if (netif_carrier_ok(dev->ndev))
1104                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1105                        dev->ndev->name, dev->phy.speed,
1106                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1107                        dev->phy.pause ? ", pause enabled" :
1108                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1109         else
1110                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1111 }
1112
1113 /* Process ctx, rtnl_lock semaphore */
1114 static int emac_open(struct net_device *ndev)
1115 {
1116         struct emac_instance *dev = netdev_priv(ndev);
1117         int err, i;
1118
1119         DBG(dev, "open" NL);
1120
1121         /* Setup error IRQ handler */
1122         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1123         if (err) {
1124                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1125                        ndev->name, dev->emac_irq);
1126                 return err;
1127         }
1128
1129         /* Allocate RX ring */
1130         for (i = 0; i < NUM_RX_BUFF; ++i)
1131                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1132                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1133                                ndev->name);
1134                         goto oom;
1135                 }
1136
1137         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1138         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1139         dev->rx_sg_skb = NULL;
1140
1141         mutex_lock(&dev->link_lock);
1142         dev->opened = 1;
1143
1144         /* Start PHY polling now.
1145          */
1146         if (dev->phy.address >= 0) {
1147                 int link_poll_interval;
1148                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1149                         dev->phy.def->ops->read_link(&dev->phy);
1150                         emac_rx_clk_default(dev);
1151                         netif_carrier_on(dev->ndev);
1152                         link_poll_interval = PHY_POLL_LINK_ON;
1153                 } else {
1154                         emac_rx_clk_tx(dev);
1155                         netif_carrier_off(dev->ndev);
1156                         link_poll_interval = PHY_POLL_LINK_OFF;
1157                 }
1158                 dev->link_polling = 1;
1159                 wmb();
1160                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1161                 emac_print_link_status(dev);
1162         } else
1163                 netif_carrier_on(dev->ndev);
1164
1165         /* Required for Pause packet support in EMAC */
1166         dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1167
1168         emac_configure(dev);
1169         mal_poll_add(dev->mal, &dev->commac);
1170         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1171         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1172         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1173         emac_tx_enable(dev);
1174         emac_rx_enable(dev);
1175         emac_netif_start(dev);
1176
1177         mutex_unlock(&dev->link_lock);
1178
1179         return 0;
1180  oom:
1181         emac_clean_rx_ring(dev);
1182         free_irq(dev->emac_irq, dev);
1183
1184         return -ENOMEM;
1185 }
1186
1187 /* BHs disabled */
1188 #if 0
1189 static int emac_link_differs(struct emac_instance *dev)
1190 {
1191         u32 r = in_be32(&dev->emacp->mr1);
1192
1193         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1194         int speed, pause, asym_pause;
1195
1196         if (r & EMAC_MR1_MF_1000)
1197                 speed = SPEED_1000;
1198         else if (r & EMAC_MR1_MF_100)
1199                 speed = SPEED_100;
1200         else
1201                 speed = SPEED_10;
1202
1203         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1204         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1205                 pause = 1;
1206                 asym_pause = 0;
1207                 break;
1208         case EMAC_MR1_APP:
1209                 pause = 0;
1210                 asym_pause = 1;
1211                 break;
1212         default:
1213                 pause = asym_pause = 0;
1214         }
1215         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1216             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1217 }
1218 #endif
1219
1220 static void emac_link_timer(struct work_struct *work)
1221 {
1222         struct emac_instance *dev =
1223                 container_of((struct delayed_work *)work,
1224                              struct emac_instance, link_work);
1225         int link_poll_interval;
1226
1227         mutex_lock(&dev->link_lock);
1228         DBG2(dev, "link timer" NL);
1229
1230         if (!dev->opened)
1231                 goto bail;
1232
1233         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1234                 if (!netif_carrier_ok(dev->ndev)) {
1235                         emac_rx_clk_default(dev);
1236                         /* Get new link parameters */
1237                         dev->phy.def->ops->read_link(&dev->phy);
1238
1239                         netif_carrier_on(dev->ndev);
1240                         emac_netif_stop(dev);
1241                         emac_full_tx_reset(dev);
1242                         emac_netif_start(dev);
1243                         emac_print_link_status(dev);
1244                 }
1245                 link_poll_interval = PHY_POLL_LINK_ON;
1246         } else {
1247                 if (netif_carrier_ok(dev->ndev)) {
1248                         emac_rx_clk_tx(dev);
1249                         netif_carrier_off(dev->ndev);
1250                         netif_tx_disable(dev->ndev);
1251                         emac_reinitialize(dev);
1252                         emac_print_link_status(dev);
1253                 }
1254                 link_poll_interval = PHY_POLL_LINK_OFF;
1255         }
1256         schedule_delayed_work(&dev->link_work, link_poll_interval);
1257  bail:
1258         mutex_unlock(&dev->link_lock);
1259 }
1260
1261 static void emac_force_link_update(struct emac_instance *dev)
1262 {
1263         netif_carrier_off(dev->ndev);
1264         smp_rmb();
1265         if (dev->link_polling) {
1266                 cancel_rearming_delayed_work(&dev->link_work);
1267                 if (dev->link_polling)
1268                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1269         }
1270 }
1271
1272 /* Process ctx, rtnl_lock semaphore */
1273 static int emac_close(struct net_device *ndev)
1274 {
1275         struct emac_instance *dev = netdev_priv(ndev);
1276
1277         DBG(dev, "close" NL);
1278
1279         if (dev->phy.address >= 0) {
1280                 dev->link_polling = 0;
1281                 cancel_rearming_delayed_work(&dev->link_work);
1282         }
1283         mutex_lock(&dev->link_lock);
1284         emac_netif_stop(dev);
1285         dev->opened = 0;
1286         mutex_unlock(&dev->link_lock);
1287
1288         emac_rx_disable(dev);
1289         emac_tx_disable(dev);
1290         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1291         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1292         mal_poll_del(dev->mal, &dev->commac);
1293
1294         emac_clean_tx_ring(dev);
1295         emac_clean_rx_ring(dev);
1296
1297         free_irq(dev->emac_irq, dev);
1298
1299         return 0;
1300 }
1301
1302 static inline u16 emac_tx_csum(struct emac_instance *dev,
1303                                struct sk_buff *skb)
1304 {
1305         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1306                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1307                 ++dev->stats.tx_packets_csum;
1308                 return EMAC_TX_CTRL_TAH_CSUM;
1309         }
1310         return 0;
1311 }
1312
1313 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1314 {
1315         struct emac_regs __iomem *p = dev->emacp;
1316         struct net_device *ndev = dev->ndev;
1317
1318         /* Send the packet out. If the if makes a significant perf
1319          * difference, then we can store the TMR0 value in "dev"
1320          * instead
1321          */
1322         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1323                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1324         else
1325                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1326
1327         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1328                 netif_stop_queue(ndev);
1329                 DBG2(dev, "stopped TX queue" NL);
1330         }
1331
1332         ndev->trans_start = jiffies;
1333         ++dev->stats.tx_packets;
1334         dev->stats.tx_bytes += len;
1335
1336         return 0;
1337 }
1338
1339 /* Tx lock BH */
1340 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1341 {
1342         struct emac_instance *dev = netdev_priv(ndev);
1343         unsigned int len = skb->len;
1344         int slot;
1345
1346         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1347             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1348
1349         slot = dev->tx_slot++;
1350         if (dev->tx_slot == NUM_TX_BUFF) {
1351                 dev->tx_slot = 0;
1352                 ctrl |= MAL_TX_CTRL_WRAP;
1353         }
1354
1355         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1356
1357         dev->tx_skb[slot] = skb;
1358         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1359                                                      skb->data, len,
1360                                                      DMA_TO_DEVICE);
1361         dev->tx_desc[slot].data_len = (u16) len;
1362         wmb();
1363         dev->tx_desc[slot].ctrl = ctrl;
1364
1365         return emac_xmit_finish(dev, len);
1366 }
1367
1368 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1369                                   u32 pd, int len, int last, u16 base_ctrl)
1370 {
1371         while (1) {
1372                 u16 ctrl = base_ctrl;
1373                 int chunk = min(len, MAL_MAX_TX_SIZE);
1374                 len -= chunk;
1375
1376                 slot = (slot + 1) % NUM_TX_BUFF;
1377
1378                 if (last && !len)
1379                         ctrl |= MAL_TX_CTRL_LAST;
1380                 if (slot == NUM_TX_BUFF - 1)
1381                         ctrl |= MAL_TX_CTRL_WRAP;
1382
1383                 dev->tx_skb[slot] = NULL;
1384                 dev->tx_desc[slot].data_ptr = pd;
1385                 dev->tx_desc[slot].data_len = (u16) chunk;
1386                 dev->tx_desc[slot].ctrl = ctrl;
1387                 ++dev->tx_cnt;
1388
1389                 if (!len)
1390                         break;
1391
1392                 pd += chunk;
1393         }
1394         return slot;
1395 }
1396
1397 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1398 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1399 {
1400         struct emac_instance *dev = netdev_priv(ndev);
1401         int nr_frags = skb_shinfo(skb)->nr_frags;
1402         int len = skb->len, chunk;
1403         int slot, i;
1404         u16 ctrl;
1405         u32 pd;
1406
1407         /* This is common "fast" path */
1408         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1409                 return emac_start_xmit(skb, ndev);
1410
1411         len -= skb->data_len;
1412
1413         /* Note, this is only an *estimation*, we can still run out of empty
1414          * slots because of the additional fragmentation into
1415          * MAL_MAX_TX_SIZE-sized chunks
1416          */
1417         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1418                 goto stop_queue;
1419
1420         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1421             emac_tx_csum(dev, skb);
1422         slot = dev->tx_slot;
1423
1424         /* skb data */
1425         dev->tx_skb[slot] = NULL;
1426         chunk = min(len, MAL_MAX_TX_SIZE);
1427         dev->tx_desc[slot].data_ptr = pd =
1428             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1429         dev->tx_desc[slot].data_len = (u16) chunk;
1430         len -= chunk;
1431         if (unlikely(len))
1432                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1433                                        ctrl);
1434         /* skb fragments */
1435         for (i = 0; i < nr_frags; ++i) {
1436                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1437                 len = frag->size;
1438
1439                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1440                         goto undo_frame;
1441
1442                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1443                                   DMA_TO_DEVICE);
1444
1445                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1446                                        ctrl);
1447         }
1448
1449         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1450
1451         /* Attach skb to the last slot so we don't release it too early */
1452         dev->tx_skb[slot] = skb;
1453
1454         /* Send the packet out */
1455         if (dev->tx_slot == NUM_TX_BUFF - 1)
1456                 ctrl |= MAL_TX_CTRL_WRAP;
1457         wmb();
1458         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1459         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1460
1461         return emac_xmit_finish(dev, skb->len);
1462
1463  undo_frame:
1464         /* Well, too bad. Our previous estimation was overly optimistic.
1465          * Undo everything.
1466          */
1467         while (slot != dev->tx_slot) {
1468                 dev->tx_desc[slot].ctrl = 0;
1469                 --dev->tx_cnt;
1470                 if (--slot < 0)
1471                         slot = NUM_TX_BUFF - 1;
1472         }
1473         ++dev->estats.tx_undo;
1474
1475  stop_queue:
1476         netif_stop_queue(ndev);
1477         DBG2(dev, "stopped TX queue" NL);
1478         return 1;
1479 }
1480
1481 /* Tx lock BHs */
1482 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1483 {
1484         struct emac_error_stats *st = &dev->estats;
1485
1486         DBG(dev, "BD TX error %04x" NL, ctrl);
1487
1488         ++st->tx_bd_errors;
1489         if (ctrl & EMAC_TX_ST_BFCS)
1490                 ++st->tx_bd_bad_fcs;
1491         if (ctrl & EMAC_TX_ST_LCS)
1492                 ++st->tx_bd_carrier_loss;
1493         if (ctrl & EMAC_TX_ST_ED)
1494                 ++st->tx_bd_excessive_deferral;
1495         if (ctrl & EMAC_TX_ST_EC)
1496                 ++st->tx_bd_excessive_collisions;
1497         if (ctrl & EMAC_TX_ST_LC)
1498                 ++st->tx_bd_late_collision;
1499         if (ctrl & EMAC_TX_ST_MC)
1500                 ++st->tx_bd_multple_collisions;
1501         if (ctrl & EMAC_TX_ST_SC)
1502                 ++st->tx_bd_single_collision;
1503         if (ctrl & EMAC_TX_ST_UR)
1504                 ++st->tx_bd_underrun;
1505         if (ctrl & EMAC_TX_ST_SQE)
1506                 ++st->tx_bd_sqe;
1507 }
1508
1509 static void emac_poll_tx(void *param)
1510 {
1511         struct emac_instance *dev = param;
1512         u32 bad_mask;
1513
1514         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1515
1516         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1517                 bad_mask = EMAC_IS_BAD_TX_TAH;
1518         else
1519                 bad_mask = EMAC_IS_BAD_TX;
1520
1521         netif_tx_lock_bh(dev->ndev);
1522         if (dev->tx_cnt) {
1523                 u16 ctrl;
1524                 int slot = dev->ack_slot, n = 0;
1525         again:
1526                 ctrl = dev->tx_desc[slot].ctrl;
1527                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1528                         struct sk_buff *skb = dev->tx_skb[slot];
1529                         ++n;
1530
1531                         if (skb) {
1532                                 dev_kfree_skb(skb);
1533                                 dev->tx_skb[slot] = NULL;
1534                         }
1535                         slot = (slot + 1) % NUM_TX_BUFF;
1536
1537                         if (unlikely(ctrl & bad_mask))
1538                                 emac_parse_tx_error(dev, ctrl);
1539
1540                         if (--dev->tx_cnt)
1541                                 goto again;
1542                 }
1543                 if (n) {
1544                         dev->ack_slot = slot;
1545                         if (netif_queue_stopped(dev->ndev) &&
1546                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1547                                 netif_wake_queue(dev->ndev);
1548
1549                         DBG2(dev, "tx %d pkts" NL, n);
1550                 }
1551         }
1552         netif_tx_unlock_bh(dev->ndev);
1553 }
1554
1555 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1556                                        int len)
1557 {
1558         struct sk_buff *skb = dev->rx_skb[slot];
1559
1560         DBG2(dev, "recycle %d %d" NL, slot, len);
1561
1562         if (len)
1563                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1564                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1565
1566         dev->rx_desc[slot].data_len = 0;
1567         wmb();
1568         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1569             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1570 }
1571
1572 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1573 {
1574         struct emac_error_stats *st = &dev->estats;
1575
1576         DBG(dev, "BD RX error %04x" NL, ctrl);
1577
1578         ++st->rx_bd_errors;
1579         if (ctrl & EMAC_RX_ST_OE)
1580                 ++st->rx_bd_overrun;
1581         if (ctrl & EMAC_RX_ST_BP)
1582                 ++st->rx_bd_bad_packet;
1583         if (ctrl & EMAC_RX_ST_RP)
1584                 ++st->rx_bd_runt_packet;
1585         if (ctrl & EMAC_RX_ST_SE)
1586                 ++st->rx_bd_short_event;
1587         if (ctrl & EMAC_RX_ST_AE)
1588                 ++st->rx_bd_alignment_error;
1589         if (ctrl & EMAC_RX_ST_BFCS)
1590                 ++st->rx_bd_bad_fcs;
1591         if (ctrl & EMAC_RX_ST_PTL)
1592                 ++st->rx_bd_packet_too_long;
1593         if (ctrl & EMAC_RX_ST_ORE)
1594                 ++st->rx_bd_out_of_range;
1595         if (ctrl & EMAC_RX_ST_IRE)
1596                 ++st->rx_bd_in_range;
1597 }
1598
1599 static inline void emac_rx_csum(struct emac_instance *dev,
1600                                 struct sk_buff *skb, u16 ctrl)
1601 {
1602 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1603         if (!ctrl && dev->tah_dev) {
1604                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1605                 ++dev->stats.rx_packets_csum;
1606         }
1607 #endif
1608 }
1609
1610 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1611 {
1612         if (likely(dev->rx_sg_skb != NULL)) {
1613                 int len = dev->rx_desc[slot].data_len;
1614                 int tot_len = dev->rx_sg_skb->len + len;
1615
1616                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1617                         ++dev->estats.rx_dropped_mtu;
1618                         dev_kfree_skb(dev->rx_sg_skb);
1619                         dev->rx_sg_skb = NULL;
1620                 } else {
1621                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1622                                          dev->rx_skb[slot]->data, len);
1623                         skb_put(dev->rx_sg_skb, len);
1624                         emac_recycle_rx_skb(dev, slot, len);
1625                         return 0;
1626                 }
1627         }
1628         emac_recycle_rx_skb(dev, slot, 0);
1629         return -1;
1630 }
1631
1632 /* NAPI poll context */
1633 static int emac_poll_rx(void *param, int budget)
1634 {
1635         struct emac_instance *dev = param;
1636         int slot = dev->rx_slot, received = 0;
1637
1638         DBG2(dev, "poll_rx(%d)" NL, budget);
1639
1640  again:
1641         while (budget > 0) {
1642                 int len;
1643                 struct sk_buff *skb;
1644                 u16 ctrl = dev->rx_desc[slot].ctrl;
1645
1646                 if (ctrl & MAL_RX_CTRL_EMPTY)
1647                         break;
1648
1649                 skb = dev->rx_skb[slot];
1650                 mb();
1651                 len = dev->rx_desc[slot].data_len;
1652
1653                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1654                         goto sg;
1655
1656                 ctrl &= EMAC_BAD_RX_MASK;
1657                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1658                         emac_parse_rx_error(dev, ctrl);
1659                         ++dev->estats.rx_dropped_error;
1660                         emac_recycle_rx_skb(dev, slot, 0);
1661                         len = 0;
1662                         goto next;
1663                 }
1664
1665                 if (len < ETH_HLEN) {
1666                         ++dev->estats.rx_dropped_stack;
1667                         emac_recycle_rx_skb(dev, slot, len);
1668                         goto next;
1669                 }
1670
1671                 if (len && len < EMAC_RX_COPY_THRESH) {
1672                         struct sk_buff *copy_skb =
1673                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1674                         if (unlikely(!copy_skb))
1675                                 goto oom;
1676
1677                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1678                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1679                                          len + 2);
1680                         emac_recycle_rx_skb(dev, slot, len);
1681                         skb = copy_skb;
1682                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1683                         goto oom;
1684
1685                 skb_put(skb, len);
1686         push_packet:
1687                 skb->dev = dev->ndev;
1688                 skb->protocol = eth_type_trans(skb, dev->ndev);
1689                 emac_rx_csum(dev, skb, ctrl);
1690
1691                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1692                         ++dev->estats.rx_dropped_stack;
1693         next:
1694                 ++dev->stats.rx_packets;
1695         skip:
1696                 dev->stats.rx_bytes += len;
1697                 slot = (slot + 1) % NUM_RX_BUFF;
1698                 --budget;
1699                 ++received;
1700                 continue;
1701         sg:
1702                 if (ctrl & MAL_RX_CTRL_FIRST) {
1703                         BUG_ON(dev->rx_sg_skb);
1704                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1705                                 DBG(dev, "rx OOM %d" NL, slot);
1706                                 ++dev->estats.rx_dropped_oom;
1707                                 emac_recycle_rx_skb(dev, slot, 0);
1708                         } else {
1709                                 dev->rx_sg_skb = skb;
1710                                 skb_put(skb, len);
1711                         }
1712                 } else if (!emac_rx_sg_append(dev, slot) &&
1713                            (ctrl & MAL_RX_CTRL_LAST)) {
1714
1715                         skb = dev->rx_sg_skb;
1716                         dev->rx_sg_skb = NULL;
1717
1718                         ctrl &= EMAC_BAD_RX_MASK;
1719                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1720                                 emac_parse_rx_error(dev, ctrl);
1721                                 ++dev->estats.rx_dropped_error;
1722                                 dev_kfree_skb(skb);
1723                                 len = 0;
1724                         } else
1725                                 goto push_packet;
1726                 }
1727                 goto skip;
1728         oom:
1729                 DBG(dev, "rx OOM %d" NL, slot);
1730                 /* Drop the packet and recycle skb */
1731                 ++dev->estats.rx_dropped_oom;
1732                 emac_recycle_rx_skb(dev, slot, 0);
1733                 goto next;
1734         }
1735
1736         if (received) {
1737                 DBG2(dev, "rx %d BDs" NL, received);
1738                 dev->rx_slot = slot;
1739         }
1740
1741         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1742                 mb();
1743                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1744                         DBG2(dev, "rx restart" NL);
1745                         received = 0;
1746                         goto again;
1747                 }
1748
1749                 if (dev->rx_sg_skb) {
1750                         DBG2(dev, "dropping partial rx packet" NL);
1751                         ++dev->estats.rx_dropped_error;
1752                         dev_kfree_skb(dev->rx_sg_skb);
1753                         dev->rx_sg_skb = NULL;
1754                 }
1755
1756                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1757                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1758                 emac_rx_enable(dev);
1759                 dev->rx_slot = 0;
1760         }
1761         return received;
1762 }
1763
1764 /* NAPI poll context */
1765 static int emac_peek_rx(void *param)
1766 {
1767         struct emac_instance *dev = param;
1768
1769         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1770 }
1771
1772 /* NAPI poll context */
1773 static int emac_peek_rx_sg(void *param)
1774 {
1775         struct emac_instance *dev = param;
1776
1777         int slot = dev->rx_slot;
1778         while (1) {
1779                 u16 ctrl = dev->rx_desc[slot].ctrl;
1780                 if (ctrl & MAL_RX_CTRL_EMPTY)
1781                         return 0;
1782                 else if (ctrl & MAL_RX_CTRL_LAST)
1783                         return 1;
1784
1785                 slot = (slot + 1) % NUM_RX_BUFF;
1786
1787                 /* I'm just being paranoid here :) */
1788                 if (unlikely(slot == dev->rx_slot))
1789                         return 0;
1790         }
1791 }
1792
1793 /* Hard IRQ */
1794 static void emac_rxde(void *param)
1795 {
1796         struct emac_instance *dev = param;
1797
1798         ++dev->estats.rx_stopped;
1799         emac_rx_disable_async(dev);
1800 }
1801
1802 /* Hard IRQ */
1803 static irqreturn_t emac_irq(int irq, void *dev_instance)
1804 {
1805         struct emac_instance *dev = dev_instance;
1806         struct emac_regs __iomem *p = dev->emacp;
1807         struct emac_error_stats *st = &dev->estats;
1808         u32 isr;
1809
1810         spin_lock(&dev->lock);
1811
1812         isr = in_be32(&p->isr);
1813         out_be32(&p->isr, isr);
1814
1815         DBG(dev, "isr = %08x" NL, isr);
1816
1817         if (isr & EMAC4_ISR_TXPE)
1818                 ++st->tx_parity;
1819         if (isr & EMAC4_ISR_RXPE)
1820                 ++st->rx_parity;
1821         if (isr & EMAC4_ISR_TXUE)
1822                 ++st->tx_underrun;
1823         if (isr & EMAC4_ISR_RXOE)
1824                 ++st->rx_fifo_overrun;
1825         if (isr & EMAC_ISR_OVR)
1826                 ++st->rx_overrun;
1827         if (isr & EMAC_ISR_BP)
1828                 ++st->rx_bad_packet;
1829         if (isr & EMAC_ISR_RP)
1830                 ++st->rx_runt_packet;
1831         if (isr & EMAC_ISR_SE)
1832                 ++st->rx_short_event;
1833         if (isr & EMAC_ISR_ALE)
1834                 ++st->rx_alignment_error;
1835         if (isr & EMAC_ISR_BFCS)
1836                 ++st->rx_bad_fcs;
1837         if (isr & EMAC_ISR_PTLE)
1838                 ++st->rx_packet_too_long;
1839         if (isr & EMAC_ISR_ORE)
1840                 ++st->rx_out_of_range;
1841         if (isr & EMAC_ISR_IRE)
1842                 ++st->rx_in_range;
1843         if (isr & EMAC_ISR_SQE)
1844                 ++st->tx_sqe;
1845         if (isr & EMAC_ISR_TE)
1846                 ++st->tx_errors;
1847
1848         spin_unlock(&dev->lock);
1849
1850         return IRQ_HANDLED;
1851 }
1852
1853 static struct net_device_stats *emac_stats(struct net_device *ndev)
1854 {
1855         struct emac_instance *dev = netdev_priv(ndev);
1856         struct emac_stats *st = &dev->stats;
1857         struct emac_error_stats *est = &dev->estats;
1858         struct net_device_stats *nst = &dev->nstats;
1859         unsigned long flags;
1860
1861         DBG2(dev, "stats" NL);
1862
1863         /* Compute "legacy" statistics */
1864         spin_lock_irqsave(&dev->lock, flags);
1865         nst->rx_packets = (unsigned long)st->rx_packets;
1866         nst->rx_bytes = (unsigned long)st->rx_bytes;
1867         nst->tx_packets = (unsigned long)st->tx_packets;
1868         nst->tx_bytes = (unsigned long)st->tx_bytes;
1869         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1870                                           est->rx_dropped_error +
1871                                           est->rx_dropped_resize +
1872                                           est->rx_dropped_mtu);
1873         nst->tx_dropped = (unsigned long)est->tx_dropped;
1874
1875         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1876         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1877                                               est->rx_fifo_overrun +
1878                                               est->rx_overrun);
1879         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1880                                                est->rx_alignment_error);
1881         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1882                                              est->rx_bad_fcs);
1883         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1884                                                 est->rx_bd_short_event +
1885                                                 est->rx_bd_packet_too_long +
1886                                                 est->rx_bd_out_of_range +
1887                                                 est->rx_bd_in_range +
1888                                                 est->rx_runt_packet +
1889                                                 est->rx_short_event +
1890                                                 est->rx_packet_too_long +
1891                                                 est->rx_out_of_range +
1892                                                 est->rx_in_range);
1893
1894         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1895         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1896                                               est->tx_underrun);
1897         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1898         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1899                                           est->tx_bd_excessive_collisions +
1900                                           est->tx_bd_late_collision +
1901                                           est->tx_bd_multple_collisions);
1902         spin_unlock_irqrestore(&dev->lock, flags);
1903         return nst;
1904 }
1905
1906 static struct mal_commac_ops emac_commac_ops = {
1907         .poll_tx = &emac_poll_tx,
1908         .poll_rx = &emac_poll_rx,
1909         .peek_rx = &emac_peek_rx,
1910         .rxde = &emac_rxde,
1911 };
1912
1913 static struct mal_commac_ops emac_commac_sg_ops = {
1914         .poll_tx = &emac_poll_tx,
1915         .poll_rx = &emac_poll_rx,
1916         .peek_rx = &emac_peek_rx_sg,
1917         .rxde = &emac_rxde,
1918 };
1919
1920 /* Ethtool support */
1921 static int emac_ethtool_get_settings(struct net_device *ndev,
1922                                      struct ethtool_cmd *cmd)
1923 {
1924         struct emac_instance *dev = netdev_priv(ndev);
1925
1926         cmd->supported = dev->phy.features;
1927         cmd->port = PORT_MII;
1928         cmd->phy_address = dev->phy.address;
1929         cmd->transceiver =
1930             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1931
1932         mutex_lock(&dev->link_lock);
1933         cmd->advertising = dev->phy.advertising;
1934         cmd->autoneg = dev->phy.autoneg;
1935         cmd->speed = dev->phy.speed;
1936         cmd->duplex = dev->phy.duplex;
1937         mutex_unlock(&dev->link_lock);
1938
1939         return 0;
1940 }
1941
1942 static int emac_ethtool_set_settings(struct net_device *ndev,
1943                                      struct ethtool_cmd *cmd)
1944 {
1945         struct emac_instance *dev = netdev_priv(ndev);
1946         u32 f = dev->phy.features;
1947
1948         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1949             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1950
1951         /* Basic sanity checks */
1952         if (dev->phy.address < 0)
1953                 return -EOPNOTSUPP;
1954         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1955                 return -EINVAL;
1956         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1957                 return -EINVAL;
1958         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1959                 return -EINVAL;
1960
1961         if (cmd->autoneg == AUTONEG_DISABLE) {
1962                 switch (cmd->speed) {
1963                 case SPEED_10:
1964                         if (cmd->duplex == DUPLEX_HALF
1965                             && !(f & SUPPORTED_10baseT_Half))
1966                                 return -EINVAL;
1967                         if (cmd->duplex == DUPLEX_FULL
1968                             && !(f & SUPPORTED_10baseT_Full))
1969                                 return -EINVAL;
1970                         break;
1971                 case SPEED_100:
1972                         if (cmd->duplex == DUPLEX_HALF
1973                             && !(f & SUPPORTED_100baseT_Half))
1974                                 return -EINVAL;
1975                         if (cmd->duplex == DUPLEX_FULL
1976                             && !(f & SUPPORTED_100baseT_Full))
1977                                 return -EINVAL;
1978                         break;
1979                 case SPEED_1000:
1980                         if (cmd->duplex == DUPLEX_HALF
1981                             && !(f & SUPPORTED_1000baseT_Half))
1982                                 return -EINVAL;
1983                         if (cmd->duplex == DUPLEX_FULL
1984                             && !(f & SUPPORTED_1000baseT_Full))
1985                                 return -EINVAL;
1986                         break;
1987                 default:
1988                         return -EINVAL;
1989                 }
1990
1991                 mutex_lock(&dev->link_lock);
1992                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1993                                                 cmd->duplex);
1994                 mutex_unlock(&dev->link_lock);
1995
1996         } else {
1997                 if (!(f & SUPPORTED_Autoneg))
1998                         return -EINVAL;
1999
2000                 mutex_lock(&dev->link_lock);
2001                 dev->phy.def->ops->setup_aneg(&dev->phy,
2002                                               (cmd->advertising & f) |
2003                                               (dev->phy.advertising &
2004                                                (ADVERTISED_Pause |
2005                                                 ADVERTISED_Asym_Pause)));
2006                 mutex_unlock(&dev->link_lock);
2007         }
2008         emac_force_link_update(dev);
2009
2010         return 0;
2011 }
2012
2013 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2014                                        struct ethtool_ringparam *rp)
2015 {
2016         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2017         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2018 }
2019
2020 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2021                                         struct ethtool_pauseparam *pp)
2022 {
2023         struct emac_instance *dev = netdev_priv(ndev);
2024
2025         mutex_lock(&dev->link_lock);
2026         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2027             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2028                 pp->autoneg = 1;
2029
2030         if (dev->phy.duplex == DUPLEX_FULL) {
2031                 if (dev->phy.pause)
2032                         pp->rx_pause = pp->tx_pause = 1;
2033                 else if (dev->phy.asym_pause)
2034                         pp->tx_pause = 1;
2035         }
2036         mutex_unlock(&dev->link_lock);
2037 }
2038
2039 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2040 {
2041         struct emac_instance *dev = netdev_priv(ndev);
2042
2043         return dev->tah_dev != NULL;
2044 }
2045
2046 static int emac_get_regs_len(struct emac_instance *dev)
2047 {
2048         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2049                 return sizeof(struct emac_ethtool_regs_subhdr) +
2050                         EMAC4_ETHTOOL_REGS_SIZE(dev);
2051         else
2052                 return sizeof(struct emac_ethtool_regs_subhdr) +
2053                         EMAC_ETHTOOL_REGS_SIZE(dev);
2054 }
2055
2056 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2057 {
2058         struct emac_instance *dev = netdev_priv(ndev);
2059         int size;
2060
2061         size = sizeof(struct emac_ethtool_regs_hdr) +
2062                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2063         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2064                 size += zmii_get_regs_len(dev->zmii_dev);
2065         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2066                 size += rgmii_get_regs_len(dev->rgmii_dev);
2067         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2068                 size += tah_get_regs_len(dev->tah_dev);
2069
2070         return size;
2071 }
2072
2073 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2074 {
2075         struct emac_ethtool_regs_subhdr *hdr = buf;
2076
2077         hdr->index = dev->cell_index;
2078         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2079                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2080                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2081                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2082         } else {
2083                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2084                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2085                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2086         }
2087 }
2088
2089 static void emac_ethtool_get_regs(struct net_device *ndev,
2090                                   struct ethtool_regs *regs, void *buf)
2091 {
2092         struct emac_instance *dev = netdev_priv(ndev);
2093         struct emac_ethtool_regs_hdr *hdr = buf;
2094
2095         hdr->components = 0;
2096         buf = hdr + 1;
2097
2098         buf = mal_dump_regs(dev->mal, buf);
2099         buf = emac_dump_regs(dev, buf);
2100         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2101                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2102                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2103         }
2104         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2105                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2106                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2107         }
2108         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2109                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2110                 buf = tah_dump_regs(dev->tah_dev, buf);
2111         }
2112 }
2113
2114 static int emac_ethtool_nway_reset(struct net_device *ndev)
2115 {
2116         struct emac_instance *dev = netdev_priv(ndev);
2117         int res = 0;
2118
2119         DBG(dev, "nway_reset" NL);
2120
2121         if (dev->phy.address < 0)
2122                 return -EOPNOTSUPP;
2123
2124         mutex_lock(&dev->link_lock);
2125         if (!dev->phy.autoneg) {
2126                 res = -EINVAL;
2127                 goto out;
2128         }
2129
2130         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2131  out:
2132         mutex_unlock(&dev->link_lock);
2133         emac_force_link_update(dev);
2134         return res;
2135 }
2136
2137 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2138 {
2139         return EMAC_ETHTOOL_STATS_COUNT;
2140 }
2141
2142 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2143                                      u8 * buf)
2144 {
2145         if (stringset == ETH_SS_STATS)
2146                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2147 }
2148
2149 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2150                                            struct ethtool_stats *estats,
2151                                            u64 * tmp_stats)
2152 {
2153         struct emac_instance *dev = netdev_priv(ndev);
2154
2155         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2156         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2157         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2158 }
2159
2160 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2161                                      struct ethtool_drvinfo *info)
2162 {
2163         struct emac_instance *dev = netdev_priv(ndev);
2164
2165         strcpy(info->driver, "ibm_emac");
2166         strcpy(info->version, DRV_VERSION);
2167         info->fw_version[0] = '\0';
2168         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2169                 dev->cell_index, dev->ofdev->node->full_name);
2170         info->n_stats = emac_ethtool_get_stats_count(ndev);
2171         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2172 }
2173
2174 static const struct ethtool_ops emac_ethtool_ops = {
2175         .get_settings = emac_ethtool_get_settings,
2176         .set_settings = emac_ethtool_set_settings,
2177         .get_drvinfo = emac_ethtool_get_drvinfo,
2178
2179         .get_regs_len = emac_ethtool_get_regs_len,
2180         .get_regs = emac_ethtool_get_regs,
2181
2182         .nway_reset = emac_ethtool_nway_reset,
2183
2184         .get_ringparam = emac_ethtool_get_ringparam,
2185         .get_pauseparam = emac_ethtool_get_pauseparam,
2186
2187         .get_rx_csum = emac_ethtool_get_rx_csum,
2188
2189         .get_strings = emac_ethtool_get_strings,
2190         .get_stats_count = emac_ethtool_get_stats_count,
2191         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2192
2193         .get_link = ethtool_op_get_link,
2194         .get_tx_csum = ethtool_op_get_tx_csum,
2195         .get_sg = ethtool_op_get_sg,
2196 };
2197
2198 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2199 {
2200         struct emac_instance *dev = netdev_priv(ndev);
2201         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2202
2203         DBG(dev, "ioctl %08x" NL, cmd);
2204
2205         if (dev->phy.address < 0)
2206                 return -EOPNOTSUPP;
2207
2208         switch (cmd) {
2209         case SIOCGMIIPHY:
2210         case SIOCDEVPRIVATE:
2211                 data[0] = dev->phy.address;
2212                 /* Fall through */
2213         case SIOCGMIIREG:
2214         case SIOCDEVPRIVATE + 1:
2215                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2216                 return 0;
2217
2218         case SIOCSMIIREG:
2219         case SIOCDEVPRIVATE + 2:
2220                 if (!capable(CAP_NET_ADMIN))
2221                         return -EPERM;
2222                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2223                 return 0;
2224         default:
2225                 return -EOPNOTSUPP;
2226         }
2227 }
2228
2229 struct emac_depentry {
2230         u32                     phandle;
2231         struct device_node      *node;
2232         struct of_device        *ofdev;
2233         void                    *drvdata;
2234 };
2235
2236 #define EMAC_DEP_MAL_IDX        0
2237 #define EMAC_DEP_ZMII_IDX       1
2238 #define EMAC_DEP_RGMII_IDX      2
2239 #define EMAC_DEP_TAH_IDX        3
2240 #define EMAC_DEP_MDIO_IDX       4
2241 #define EMAC_DEP_PREV_IDX       5
2242 #define EMAC_DEP_COUNT          6
2243
2244 static int __devinit emac_check_deps(struct emac_instance *dev,
2245                                      struct emac_depentry *deps)
2246 {
2247         int i, there = 0;
2248         struct device_node *np;
2249
2250         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2251                 /* no dependency on that item, allright */
2252                 if (deps[i].phandle == 0) {
2253                         there++;
2254                         continue;
2255                 }
2256                 /* special case for blist as the dependency might go away */
2257                 if (i == EMAC_DEP_PREV_IDX) {
2258                         np = *(dev->blist - 1);
2259                         if (np == NULL) {
2260                                 deps[i].phandle = 0;
2261                                 there++;
2262                                 continue;
2263                         }
2264                         if (deps[i].node == NULL)
2265                                 deps[i].node = of_node_get(np);
2266                 }
2267                 if (deps[i].node == NULL)
2268                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2269                 if (deps[i].node == NULL)
2270                         continue;
2271                 if (deps[i].ofdev == NULL)
2272                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2273                 if (deps[i].ofdev == NULL)
2274                         continue;
2275                 if (deps[i].drvdata == NULL)
2276                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2277                 if (deps[i].drvdata != NULL)
2278                         there++;
2279         }
2280         return (there == EMAC_DEP_COUNT);
2281 }
2282
2283 static void emac_put_deps(struct emac_instance *dev)
2284 {
2285         if (dev->mal_dev)
2286                 of_dev_put(dev->mal_dev);
2287         if (dev->zmii_dev)
2288                 of_dev_put(dev->zmii_dev);
2289         if (dev->rgmii_dev)
2290                 of_dev_put(dev->rgmii_dev);
2291         if (dev->mdio_dev)
2292                 of_dev_put(dev->mdio_dev);
2293         if (dev->tah_dev)
2294                 of_dev_put(dev->tah_dev);
2295 }
2296
2297 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2298                                         unsigned long action, void *data)
2299 {
2300         /* We are only intereted in device addition */
2301         if (action == BUS_NOTIFY_BOUND_DRIVER)
2302                 wake_up_all(&emac_probe_wait);
2303         return 0;
2304 }
2305
2306 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2307         .notifier_call = emac_of_bus_notify
2308 };
2309
2310 static int __devinit emac_wait_deps(struct emac_instance *dev)
2311 {
2312         struct emac_depentry deps[EMAC_DEP_COUNT];
2313         int i, err;
2314
2315         memset(&deps, 0, sizeof(deps));
2316
2317         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2318         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2319         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2320         if (dev->tah_ph)
2321                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2322         if (dev->mdio_ph)
2323                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2324         if (dev->blist && dev->blist > emac_boot_list)
2325                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2326         bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2327         wait_event_timeout(emac_probe_wait,
2328                            emac_check_deps(dev, deps),
2329                            EMAC_PROBE_DEP_TIMEOUT);
2330         bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2331         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2332         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2333                 if (deps[i].node)
2334                         of_node_put(deps[i].node);
2335                 if (err && deps[i].ofdev)
2336                         of_dev_put(deps[i].ofdev);
2337         }
2338         if (err == 0) {
2339                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2340                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2341                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2342                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2343                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2344         }
2345         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2346                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2347         return err;
2348 }
2349
2350 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2351                                          u32 *val, int fatal)
2352 {
2353         int len;
2354         const u32 *prop = of_get_property(np, name, &len);
2355         if (prop == NULL || len < sizeof(u32)) {
2356                 if (fatal)
2357                         printk(KERN_ERR "%s: missing %s property\n",
2358                                np->full_name, name);
2359                 return -ENODEV;
2360         }
2361         *val = *prop;
2362         return 0;
2363 }
2364
2365 static int __devinit emac_init_phy(struct emac_instance *dev)
2366 {
2367         struct device_node *np = dev->ofdev->node;
2368         struct net_device *ndev = dev->ndev;
2369         u32 phy_map, adv;
2370         int i;
2371
2372         dev->phy.dev = ndev;
2373         dev->phy.mode = dev->phy_mode;
2374
2375         /* PHY-less configuration.
2376          * XXX I probably should move these settings to the dev tree
2377          */
2378         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2379                 emac_reset(dev);
2380
2381                 /* PHY-less configuration.
2382                  * XXX I probably should move these settings to the dev tree
2383                  */
2384                 dev->phy.address = -1;
2385                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2386                 dev->phy.pause = 1;
2387
2388                 return 0;
2389         }
2390
2391         mutex_lock(&emac_phy_map_lock);
2392         phy_map = dev->phy_map | busy_phy_map;
2393
2394         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2395
2396         dev->phy.mdio_read = emac_mdio_read;
2397         dev->phy.mdio_write = emac_mdio_write;
2398
2399         /* Enable internal clock source */
2400 #ifdef CONFIG_PPC_DCR_NATIVE
2401         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2402                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2403 #endif
2404         /* PHY clock workaround */
2405         emac_rx_clk_tx(dev);
2406
2407         /* Enable internal clock source on 440GX*/
2408 #ifdef CONFIG_PPC_DCR_NATIVE
2409         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2410                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2411 #endif
2412         /* Configure EMAC with defaults so we can at least use MDIO
2413          * This is needed mostly for 440GX
2414          */
2415         if (emac_phy_gpcs(dev->phy.mode)) {
2416                 /* XXX
2417                  * Make GPCS PHY address equal to EMAC index.
2418                  * We probably should take into account busy_phy_map
2419                  * and/or phy_map here.
2420                  *
2421                  * Note that the busy_phy_map is currently global
2422                  * while it should probably be per-ASIC...
2423                  */
2424                 dev->phy.address = dev->cell_index;
2425         }
2426
2427         emac_configure(dev);
2428
2429         if (dev->phy_address != 0xffffffff)
2430                 phy_map = ~(1 << dev->phy_address);
2431
2432         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2433                 if (!(phy_map & 1)) {
2434                         int r;
2435                         busy_phy_map |= 1 << i;
2436
2437                         /* Quick check if there is a PHY at the address */
2438                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2439                         if (r == 0xffff || r < 0)
2440                                 continue;
2441                         if (!emac_mii_phy_probe(&dev->phy, i))
2442                                 break;
2443                 }
2444
2445         /* Enable external clock source */
2446 #ifdef CONFIG_PPC_DCR_NATIVE
2447         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2448                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2449 #endif
2450         mutex_unlock(&emac_phy_map_lock);
2451         if (i == 0x20) {
2452                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2453                 return -ENXIO;
2454         }
2455
2456         /* Init PHY */
2457         if (dev->phy.def->ops->init)
2458                 dev->phy.def->ops->init(&dev->phy);
2459
2460         /* Disable any PHY features not supported by the platform */
2461         dev->phy.def->features &= ~dev->phy_feat_exc;
2462
2463         /* Setup initial link parameters */
2464         if (dev->phy.features & SUPPORTED_Autoneg) {
2465                 adv = dev->phy.features;
2466                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2467                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2468                 /* Restart autonegotiation */
2469                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2470         } else {
2471                 u32 f = dev->phy.def->features;
2472                 int speed = SPEED_10, fd = DUPLEX_HALF;
2473
2474                 /* Select highest supported speed/duplex */
2475                 if (f & SUPPORTED_1000baseT_Full) {
2476                         speed = SPEED_1000;
2477                         fd = DUPLEX_FULL;
2478                 } else if (f & SUPPORTED_1000baseT_Half)
2479                         speed = SPEED_1000;
2480                 else if (f & SUPPORTED_100baseT_Full) {
2481                         speed = SPEED_100;
2482                         fd = DUPLEX_FULL;
2483                 } else if (f & SUPPORTED_100baseT_Half)
2484                         speed = SPEED_100;
2485                 else if (f & SUPPORTED_10baseT_Full)
2486                         fd = DUPLEX_FULL;
2487
2488                 /* Force link parameters */
2489                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2490         }
2491         return 0;
2492 }
2493
2494 static int __devinit emac_init_config(struct emac_instance *dev)
2495 {
2496         struct device_node *np = dev->ofdev->node;
2497         const void *p;
2498         unsigned int plen;
2499         const char *pm, *phy_modes[] = {
2500                 [PHY_MODE_NA] = "",
2501                 [PHY_MODE_MII] = "mii",
2502                 [PHY_MODE_RMII] = "rmii",
2503                 [PHY_MODE_SMII] = "smii",
2504                 [PHY_MODE_RGMII] = "rgmii",
2505                 [PHY_MODE_TBI] = "tbi",
2506                 [PHY_MODE_GMII] = "gmii",
2507                 [PHY_MODE_RTBI] = "rtbi",
2508                 [PHY_MODE_SGMII] = "sgmii",
2509         };
2510
2511         /* Read config from device-tree */
2512         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2513                 return -ENXIO;
2514         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2515                 return -ENXIO;
2516         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2517                 return -ENXIO;
2518         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2519                 return -ENXIO;
2520         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2521                 dev->max_mtu = 1500;
2522         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2523                 dev->rx_fifo_size = 2048;
2524         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2525                 dev->tx_fifo_size = 2048;
2526         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2527                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2528         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2529                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2530         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2531                 dev->phy_address = 0xffffffff;
2532         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2533                 dev->phy_map = 0xffffffff;
2534         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2535                 return -ENXIO;
2536         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2537                 dev->tah_ph = 0;
2538         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2539                 dev->tah_port = 0;
2540         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2541                 dev->mdio_ph = 0;
2542         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2543                 dev->zmii_ph = 0;;
2544         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2545                 dev->zmii_port = 0xffffffff;;
2546         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2547                 dev->rgmii_ph = 0;;
2548         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2549                 dev->rgmii_port = 0xffffffff;;
2550         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2551                 dev->fifo_entry_size = 16;
2552         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2553                 dev->mal_burst_size = 256;
2554
2555         /* PHY mode needs some decoding */
2556         dev->phy_mode = PHY_MODE_NA;
2557         pm = of_get_property(np, "phy-mode", &plen);
2558         if (pm != NULL) {
2559                 int i;
2560                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2561                         if (!strcasecmp(pm, phy_modes[i])) {
2562                                 dev->phy_mode = i;
2563                                 break;
2564                         }
2565         }
2566
2567         /* Backward compat with non-final DT */
2568         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2569                 u32 nmode = *(const u32 *)pm;
2570                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2571                         dev->phy_mode = nmode;
2572         }
2573
2574         /* Check EMAC version */
2575         if (of_device_is_compatible(np, "ibm,emac4sync")) {
2576                 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2577                 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2578                     of_device_is_compatible(np, "ibm,emac-460gt"))
2579                         dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2580         } else if (of_device_is_compatible(np, "ibm,emac4")) {
2581                 dev->features |= EMAC_FTR_EMAC4;
2582                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2583                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2584         } else {
2585                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2586                     of_device_is_compatible(np, "ibm,emac-440gr"))
2587                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2588         }
2589
2590         /* Fixup some feature bits based on the device tree */
2591         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2592                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2593         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2594                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2595
2596         /* CAB lacks the appropriate properties */
2597         if (of_device_is_compatible(np, "ibm,emac-axon"))
2598                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2599                         EMAC_FTR_STACR_OC_INVERT;
2600
2601         /* Enable TAH/ZMII/RGMII features as found */
2602         if (dev->tah_ph != 0) {
2603 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2604                 dev->features |= EMAC_FTR_HAS_TAH;
2605 #else
2606                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2607                        np->full_name);
2608                 return -ENXIO;
2609 #endif
2610         }
2611
2612         if (dev->zmii_ph != 0) {
2613 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2614                 dev->features |= EMAC_FTR_HAS_ZMII;
2615 #else
2616                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2617                        np->full_name);
2618                 return -ENXIO;
2619 #endif
2620         }
2621
2622         if (dev->rgmii_ph != 0) {
2623 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2624                 dev->features |= EMAC_FTR_HAS_RGMII;
2625 #else
2626                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2627                        np->full_name);
2628                 return -ENXIO;
2629 #endif
2630         }
2631
2632         /* Read MAC-address */
2633         p = of_get_property(np, "local-mac-address", NULL);
2634         if (p == NULL) {
2635                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2636                        np->full_name);
2637                 return -ENXIO;
2638         }
2639         memcpy(dev->ndev->dev_addr, p, 6);
2640
2641         /* IAHT and GAHT filter parameterization */
2642         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2643                 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2644                 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2645         } else {
2646                 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2647                 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2648         }
2649
2650         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2651         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2652         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2653         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2654         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2655
2656         return 0;
2657 }
2658
2659 static int __devinit emac_probe(struct of_device *ofdev,
2660                                 const struct of_device_id *match)
2661 {
2662         struct net_device *ndev;
2663         struct emac_instance *dev;
2664         struct device_node *np = ofdev->node;
2665         struct device_node **blist = NULL;
2666         int err, i;
2667
2668         /* Skip unused/unwired EMACS.  We leave the check for an unused
2669          * property here for now, but new flat device trees should set a
2670          * status property to "disabled" instead.
2671          */
2672         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2673                 return -ENODEV;
2674
2675         /* Find ourselves in the bootlist if we are there */
2676         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2677                 if (emac_boot_list[i] == np)
2678                         blist = &emac_boot_list[i];
2679
2680         /* Allocate our net_device structure */
2681         err = -ENOMEM;
2682         ndev = alloc_etherdev(sizeof(struct emac_instance));
2683         if (!ndev) {
2684                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2685                        np->full_name);
2686                 goto err_gone;
2687         }
2688         dev = netdev_priv(ndev);
2689         dev->ndev = ndev;
2690         dev->ofdev = ofdev;
2691         dev->blist = blist;
2692         SET_NETDEV_DEV(ndev, &ofdev->dev);
2693
2694         /* Initialize some embedded data structures */
2695         mutex_init(&dev->mdio_lock);
2696         mutex_init(&dev->link_lock);
2697         spin_lock_init(&dev->lock);
2698         INIT_WORK(&dev->reset_work, emac_reset_work);
2699
2700         /* Init various config data based on device-tree */
2701         err = emac_init_config(dev);
2702         if (err != 0)
2703                 goto err_free;
2704
2705         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2706         dev->emac_irq = irq_of_parse_and_map(np, 0);
2707         dev->wol_irq = irq_of_parse_and_map(np, 1);
2708         if (dev->emac_irq == NO_IRQ) {
2709                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2710                 goto err_free;
2711         }
2712         ndev->irq = dev->emac_irq;
2713
2714         /* Map EMAC regs */
2715         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2716                 printk(KERN_ERR "%s: Can't get registers address\n",
2717                        np->full_name);
2718                 goto err_irq_unmap;
2719         }
2720         // TODO : request_mem_region
2721         dev->emacp = ioremap(dev->rsrc_regs.start,
2722                              dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2723         if (dev->emacp == NULL) {
2724                 printk(KERN_ERR "%s: Can't map device registers!\n",
2725                        np->full_name);
2726                 err = -ENOMEM;
2727                 goto err_irq_unmap;
2728         }
2729
2730         /* Wait for dependent devices */
2731         err = emac_wait_deps(dev);
2732         if (err) {
2733                 printk(KERN_ERR
2734                        "%s: Timeout waiting for dependent devices\n",
2735                        np->full_name);
2736                 /*  display more info about what's missing ? */
2737                 goto err_reg_unmap;
2738         }
2739         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2740         if (dev->mdio_dev != NULL)
2741                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2742
2743         /* Register with MAL */
2744         dev->commac.ops = &emac_commac_ops;
2745         dev->commac.dev = dev;
2746         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2747         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2748         err = mal_register_commac(dev->mal, &dev->commac);
2749         if (err) {
2750                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2751                        np->full_name, dev->mal_dev->node->full_name);
2752                 goto err_rel_deps;
2753         }
2754         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2755         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2756
2757         /* Get pointers to BD rings */
2758         dev->tx_desc =
2759             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2760         dev->rx_desc =
2761             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2762
2763         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2764         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2765
2766         /* Clean rings */
2767         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2768         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2769         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2770         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2771
2772         /* Attach to ZMII, if needed */
2773         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2774             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2775                 goto err_unreg_commac;
2776
2777         /* Attach to RGMII, if needed */
2778         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2779             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2780                 goto err_detach_zmii;
2781
2782         /* Attach to TAH, if needed */
2783         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2784             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2785                 goto err_detach_rgmii;
2786
2787         /* Set some link defaults before we can find out real parameters */
2788         dev->phy.speed = SPEED_100;
2789         dev->phy.duplex = DUPLEX_FULL;
2790         dev->phy.autoneg = AUTONEG_DISABLE;
2791         dev->phy.pause = dev->phy.asym_pause = 0;
2792         dev->stop_timeout = STOP_TIMEOUT_100;
2793         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2794
2795         /* Find PHY if any */
2796         err = emac_init_phy(dev);
2797         if (err != 0)
2798                 goto err_detach_tah;
2799
2800         /* Fill in the driver function table */
2801         ndev->open = &emac_open;
2802         if (dev->tah_dev)
2803                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2804         ndev->tx_timeout = &emac_tx_timeout;
2805         ndev->watchdog_timeo = 5 * HZ;
2806         ndev->stop = &emac_close;
2807         ndev->get_stats = &emac_stats;
2808         ndev->set_multicast_list = &emac_set_multicast_list;
2809         ndev->do_ioctl = &emac_ioctl;
2810         if (emac_phy_supports_gige(dev->phy_mode)) {
2811                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2812                 ndev->change_mtu = &emac_change_mtu;
2813                 dev->commac.ops = &emac_commac_sg_ops;
2814         } else {
2815                 ndev->hard_start_xmit = &emac_start_xmit;
2816         }
2817         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2818
2819         netif_carrier_off(ndev);
2820         netif_stop_queue(ndev);
2821
2822         err = register_netdev(ndev);
2823         if (err) {
2824                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2825                        np->full_name, err);
2826                 goto err_detach_tah;
2827         }
2828
2829         /* Set our drvdata last as we don't want them visible until we are
2830          * fully initialized
2831          */
2832         wmb();
2833         dev_set_drvdata(&ofdev->dev, dev);
2834
2835         /* There's a new kid in town ! Let's tell everybody */
2836         wake_up_all(&emac_probe_wait);
2837
2838
2839         printk(KERN_INFO
2840                "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2841                ndev->name, dev->cell_index, np->full_name,
2842                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2843                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2844
2845         if (dev->phy.address >= 0)
2846                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2847                        dev->phy.def->name, dev->phy.address);
2848
2849         emac_dbg_register(dev);
2850
2851         /* Life is good */
2852         return 0;
2853
2854         /* I have a bad feeling about this ... */
2855
2856  err_detach_tah:
2857         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2858                 tah_detach(dev->tah_dev, dev->tah_port);
2859  err_detach_rgmii:
2860         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2861                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2862  err_detach_zmii:
2863         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2864                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2865  err_unreg_commac:
2866         mal_unregister_commac(dev->mal, &dev->commac);
2867  err_rel_deps:
2868         emac_put_deps(dev);
2869  err_reg_unmap:
2870         iounmap(dev->emacp);
2871  err_irq_unmap:
2872         if (dev->wol_irq != NO_IRQ)
2873                 irq_dispose_mapping(dev->wol_irq);
2874         if (dev->emac_irq != NO_IRQ)
2875                 irq_dispose_mapping(dev->emac_irq);
2876  err_free:
2877         kfree(ndev);
2878  err_gone:
2879         /* if we were on the bootlist, remove us as we won't show up and
2880          * wake up all waiters to notify them in case they were waiting
2881          * on us
2882          */
2883         if (blist) {
2884                 *blist = NULL;
2885                 wake_up_all(&emac_probe_wait);
2886         }
2887         return err;
2888 }
2889
2890 static int __devexit emac_remove(struct of_device *ofdev)
2891 {
2892         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2893
2894         DBG(dev, "remove" NL);
2895
2896         dev_set_drvdata(&ofdev->dev, NULL);
2897
2898         unregister_netdev(dev->ndev);
2899
2900         flush_scheduled_work();
2901
2902         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2903                 tah_detach(dev->tah_dev, dev->tah_port);
2904         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2905                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2906         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2907                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2908
2909         mal_unregister_commac(dev->mal, &dev->commac);
2910         emac_put_deps(dev);
2911
2912         emac_dbg_unregister(dev);
2913         iounmap(dev->emacp);
2914
2915         if (dev->wol_irq != NO_IRQ)
2916                 irq_dispose_mapping(dev->wol_irq);
2917         if (dev->emac_irq != NO_IRQ)
2918                 irq_dispose_mapping(dev->emac_irq);
2919
2920         kfree(dev->ndev);
2921
2922         return 0;
2923 }
2924
2925 /* XXX Features in here should be replaced by properties... */
2926 static struct of_device_id emac_match[] =
2927 {
2928         {
2929                 .type           = "network",
2930                 .compatible     = "ibm,emac",
2931         },
2932         {
2933                 .type           = "network",
2934                 .compatible     = "ibm,emac4",
2935         },
2936         {
2937                 .type           = "network",
2938                 .compatible     = "ibm,emac4sync",
2939         },
2940         {},
2941 };
2942
2943 static struct of_platform_driver emac_driver = {
2944         .name = "emac",
2945         .match_table = emac_match,
2946
2947         .probe = emac_probe,
2948         .remove = emac_remove,
2949 };
2950
2951 static void __init emac_make_bootlist(void)
2952 {
2953         struct device_node *np = NULL;
2954         int j, max, i = 0, k;
2955         int cell_indices[EMAC_BOOT_LIST_SIZE];
2956
2957         /* Collect EMACs */
2958         while((np = of_find_all_nodes(np)) != NULL) {
2959                 const u32 *idx;
2960
2961                 if (of_match_node(emac_match, np) == NULL)
2962                         continue;
2963                 if (of_get_property(np, "unused", NULL))
2964                         continue;
2965                 idx = of_get_property(np, "cell-index", NULL);
2966                 if (idx == NULL)
2967                         continue;
2968                 cell_indices[i] = *idx;
2969                 emac_boot_list[i++] = of_node_get(np);
2970                 if (i >= EMAC_BOOT_LIST_SIZE) {
2971                         of_node_put(np);
2972                         break;
2973                 }
2974         }
2975         max = i;
2976
2977         /* Bubble sort them (doh, what a creative algorithm :-) */
2978         for (i = 0; max > 1 && (i < (max - 1)); i++)
2979                 for (j = i; j < max; j++) {
2980                         if (cell_indices[i] > cell_indices[j]) {
2981                                 np = emac_boot_list[i];
2982                                 emac_boot_list[i] = emac_boot_list[j];
2983                                 emac_boot_list[j] = np;
2984                                 k = cell_indices[i];
2985                                 cell_indices[i] = cell_indices[j];
2986                                 cell_indices[j] = k;
2987                         }
2988                 }
2989 }
2990
2991 static int __init emac_init(void)
2992 {
2993         int rc;
2994
2995         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2996
2997         /* Init debug stuff */
2998         emac_init_debug();
2999
3000         /* Build EMAC boot list */
3001         emac_make_bootlist();
3002
3003         /* Init submodules */
3004         rc = mal_init();
3005         if (rc)
3006                 goto err;
3007         rc = zmii_init();
3008         if (rc)
3009                 goto err_mal;
3010         rc = rgmii_init();
3011         if (rc)
3012                 goto err_zmii;
3013         rc = tah_init();
3014         if (rc)
3015                 goto err_rgmii;
3016         rc = of_register_platform_driver(&emac_driver);
3017         if (rc)
3018                 goto err_tah;
3019
3020         return 0;
3021
3022  err_tah:
3023         tah_exit();
3024  err_rgmii:
3025         rgmii_exit();
3026  err_zmii:
3027         zmii_exit();
3028  err_mal:
3029         mal_exit();
3030  err:
3031         return rc;
3032 }
3033
3034 static void __exit emac_exit(void)
3035 {
3036         int i;
3037
3038         of_unregister_platform_driver(&emac_driver);
3039
3040         tah_exit();
3041         rgmii_exit();
3042         zmii_exit();
3043         mal_exit();
3044         emac_fini_debug();
3045
3046         /* Destroy EMAC boot list */
3047         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3048                 if (emac_boot_list[i])
3049                         of_node_put(emac_boot_list[i]);
3050 }
3051
3052 module_init(emac_init);
3053 module_exit(emac_exit);