]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/chelsio/cxgb2.c
cxgb - fix T2 GSO
[linux-2.6-omap-h63xx.git] / drivers / net / chelsio / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69         cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES        16384
73 #define MAX_CMDQ1_ENTRIES       1024
74 #define MAX_RX_BUFFERS          16384
75 #define MAX_RX_JUMBO_BUFFERS    16384
76 #define MAX_TX_BUFFERS_HIGH     16384U
77 #define MAX_TX_BUFFERS_LOW      1536U
78 #define MAX_TX_BUFFERS          1460U
79 #define MIN_FL_ENTRIES          32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;     /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115         "33", "66", "100", "133"
116 };
117
118 /*
119  * Setup MAC to receive the types of packets we want.
120  */
121 static void t1_set_rxmode(struct net_device *dev)
122 {
123         struct adapter *adapter = dev->priv;
124         struct cmac *mac = adapter->port[dev->if_port].mac;
125         struct t1_rx_mode rm;
126
127         rm.dev = dev;
128         rm.idx = 0;
129         rm.list = dev->mc_list;
130         mac->ops->set_rx_mode(mac, &rm);
131 }
132
133 static void link_report(struct port_info *p)
134 {
135         if (!netif_carrier_ok(p->dev))
136                 printk(KERN_INFO "%s: link down\n", p->dev->name);
137         else {
138                 const char *s = "10Mbps";
139
140                 switch (p->link_config.speed) {
141                         case SPEED_10000: s = "10Gbps"; break;
142                         case SPEED_1000:  s = "1000Mbps"; break;
143                         case SPEED_100:   s = "100Mbps"; break;
144                 }
145
146                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147                        p->dev->name, s,
148                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149         }
150 }
151
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153                         int speed, int duplex, int pause)
154 {
155         struct port_info *p = &adapter->port[port_id];
156
157         if (link_stat != netif_carrier_ok(p->dev)) {
158                 if (link_stat)
159                         netif_carrier_on(p->dev);
160                 else
161                         netif_carrier_off(p->dev);
162                 link_report(p);
163
164                 /* multi-ports: inform toe */
165                 if ((speed > 0) && (adapter->params.nports > 1)) {
166                         unsigned int sched_speed = 10;
167                         switch (speed) {
168                         case SPEED_1000:
169                                 sched_speed = 1000;
170                                 break;
171                         case SPEED_100:
172                                 sched_speed = 100;
173                                 break;
174                         case SPEED_10:
175                                 sched_speed = 10;
176                                 break;
177                         }
178                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179                 }
180         }
181 }
182
183 static void link_start(struct port_info *p)
184 {
185         struct cmac *mac = p->mac;
186
187         mac->ops->reset(mac);
188         if (mac->ops->macaddress_set)
189                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190         t1_set_rxmode(p->dev);
191         t1_link_start(p->phy, mac, &p->link_config);
192         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193 }
194
195 static void enable_hw_csum(struct adapter *adapter)
196 {
197         if (adapter->flags & TSO_CAPABLE)
198                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
199         if (adapter->flags & UDP_CSUM_CAPABLE)
200                 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
202 }
203
204 /*
205  * Things to do upon first use of a card.
206  * This must run with the rtnl lock held.
207  */
208 static int cxgb_up(struct adapter *adapter)
209 {
210         int err = 0;
211
212         if (!(adapter->flags & FULL_INIT_DONE)) {
213                 err = t1_init_hw_modules(adapter);
214                 if (err)
215                         goto out_err;
216
217                 enable_hw_csum(adapter);
218                 adapter->flags |= FULL_INIT_DONE;
219         }
220
221         t1_interrupts_clear(adapter);
222
223         adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224         err = request_irq(adapter->pdev->irq, t1_interrupt,
225                           adapter->params.has_msi ? 0 : IRQF_SHARED,
226                           adapter->name, adapter);
227         if (err) {
228                 if (adapter->params.has_msi)
229                         pci_disable_msi(adapter->pdev);
230
231                 goto out_err;
232         }
233
234         t1_sge_start(adapter->sge);
235         t1_interrupts_enable(adapter);
236 out_err:
237         return err;
238 }
239
240 /*
241  * Release resources when all the ports have been stopped.
242  */
243 static void cxgb_down(struct adapter *adapter)
244 {
245         t1_sge_stop(adapter->sge);
246         t1_interrupts_disable(adapter);
247         free_irq(adapter->pdev->irq, adapter);
248         if (adapter->params.has_msi)
249                 pci_disable_msi(adapter->pdev);
250 }
251
252 static int cxgb_open(struct net_device *dev)
253 {
254         int err;
255         struct adapter *adapter = dev->priv;
256         int other_ports = adapter->open_device_map & PORT_MASK;
257
258         napi_enable(&adapter->napi);
259         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
260                 napi_disable(&adapter->napi);
261                 return err;
262         }
263
264         __set_bit(dev->if_port, &adapter->open_device_map);
265         link_start(&adapter->port[dev->if_port]);
266         netif_start_queue(dev);
267         if (!other_ports && adapter->params.stats_update_period)
268                 schedule_mac_stats_update(adapter,
269                                           adapter->params.stats_update_period);
270         return 0;
271 }
272
273 static int cxgb_close(struct net_device *dev)
274 {
275         struct adapter *adapter = dev->priv;
276         struct port_info *p = &adapter->port[dev->if_port];
277         struct cmac *mac = p->mac;
278
279         netif_stop_queue(dev);
280         napi_disable(&adapter->napi);
281         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
282         netif_carrier_off(dev);
283
284         clear_bit(dev->if_port, &adapter->open_device_map);
285         if (adapter->params.stats_update_period &&
286             !(adapter->open_device_map & PORT_MASK)) {
287                 /* Stop statistics accumulation. */
288                 smp_mb__after_clear_bit();
289                 spin_lock(&adapter->work_lock);   /* sync with update task */
290                 spin_unlock(&adapter->work_lock);
291                 cancel_mac_stats_update(adapter);
292         }
293
294         if (!adapter->open_device_map)
295                 cxgb_down(adapter);
296         return 0;
297 }
298
299 static struct net_device_stats *t1_get_stats(struct net_device *dev)
300 {
301         struct adapter *adapter = dev->priv;
302         struct port_info *p = &adapter->port[dev->if_port];
303         struct net_device_stats *ns = &p->netstats;
304         const struct cmac_statistics *pstats;
305
306         /* Do a full update of the MAC stats */
307         pstats = p->mac->ops->statistics_update(p->mac,
308                                                 MAC_STATS_UPDATE_FULL);
309
310         ns->tx_packets = pstats->TxUnicastFramesOK +
311                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
312
313         ns->rx_packets = pstats->RxUnicastFramesOK +
314                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
315
316         ns->tx_bytes = pstats->TxOctetsOK;
317         ns->rx_bytes = pstats->RxOctetsOK;
318
319         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
320                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
321         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
322                 pstats->RxFCSErrors + pstats->RxAlignErrors +
323                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
324                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
325
326         ns->multicast  = pstats->RxMulticastFramesOK;
327         ns->collisions = pstats->TxTotalCollisions;
328
329         /* detailed rx_errors */
330         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
331                 pstats->RxJabberErrors;
332         ns->rx_over_errors   = 0;
333         ns->rx_crc_errors    = pstats->RxFCSErrors;
334         ns->rx_frame_errors  = pstats->RxAlignErrors;
335         ns->rx_fifo_errors   = 0;
336         ns->rx_missed_errors = 0;
337
338         /* detailed tx_errors */
339         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
340         ns->tx_carrier_errors   = 0;
341         ns->tx_fifo_errors      = pstats->TxUnderrun;
342         ns->tx_heartbeat_errors = 0;
343         ns->tx_window_errors    = pstats->TxLateCollisions;
344         return ns;
345 }
346
347 static u32 get_msglevel(struct net_device *dev)
348 {
349         struct adapter *adapter = dev->priv;
350
351         return adapter->msg_enable;
352 }
353
354 static void set_msglevel(struct net_device *dev, u32 val)
355 {
356         struct adapter *adapter = dev->priv;
357
358         adapter->msg_enable = val;
359 }
360
361 static char stats_strings[][ETH_GSTRING_LEN] = {
362         "TxOctetsOK",
363         "TxOctetsBad",
364         "TxUnicastFramesOK",
365         "TxMulticastFramesOK",
366         "TxBroadcastFramesOK",
367         "TxPauseFrames",
368         "TxFramesWithDeferredXmissions",
369         "TxLateCollisions",
370         "TxTotalCollisions",
371         "TxFramesAbortedDueToXSCollisions",
372         "TxUnderrun",
373         "TxLengthErrors",
374         "TxInternalMACXmitError",
375         "TxFramesWithExcessiveDeferral",
376         "TxFCSErrors",
377
378         "RxOctetsOK",
379         "RxOctetsBad",
380         "RxUnicastFramesOK",
381         "RxMulticastFramesOK",
382         "RxBroadcastFramesOK",
383         "RxPauseFrames",
384         "RxFCSErrors",
385         "RxAlignErrors",
386         "RxSymbolErrors",
387         "RxDataErrors",
388         "RxSequenceErrors",
389         "RxRuntErrors",
390         "RxJabberErrors",
391         "RxInternalMACRcvError",
392         "RxInRangeLengthErrors",
393         "RxOutOfRangeLengthField",
394         "RxFrameTooLongErrors",
395
396         /* Port stats */
397         "RxPackets",
398         "RxCsumGood",
399         "TxPackets",
400         "TxCsumOffload",
401         "TxTso",
402         "RxVlan",
403         "TxVlan",
404         "TxNeedHeadroom", 
405         
406         /* Interrupt stats */
407         "rx drops",
408         "pure_rsps",
409         "unhandled irqs",
410         "respQ_empty",
411         "respQ_overflow",
412         "freelistQ_empty",
413         "pkt_too_big",
414         "pkt_mismatch",
415         "cmdQ_full0",
416         "cmdQ_full1",
417
418         "espi_DIP2ParityErr",
419         "espi_DIP4Err",
420         "espi_RxDrops",
421         "espi_TxDrops",
422         "espi_RxOvfl",
423         "espi_ParityErr"
424 };
425
426 #define T2_REGMAP_SIZE (3 * 1024)
427
428 static int get_regs_len(struct net_device *dev)
429 {
430         return T2_REGMAP_SIZE;
431 }
432
433 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
434 {
435         struct adapter *adapter = dev->priv;
436
437         strcpy(info->driver, DRV_NAME);
438         strcpy(info->version, DRV_VERSION);
439         strcpy(info->fw_version, "N/A");
440         strcpy(info->bus_info, pci_name(adapter->pdev));
441 }
442
443 static int get_sset_count(struct net_device *dev, int sset)
444 {
445         switch (sset) {
446         case ETH_SS_STATS:
447                 return ARRAY_SIZE(stats_strings);
448         default:
449                 return -EOPNOTSUPP;
450         }
451 }
452
453 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
454 {
455         if (stringset == ETH_SS_STATS)
456                 memcpy(data, stats_strings, sizeof(stats_strings));
457 }
458
459 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
460                       u64 *data)
461 {
462         struct adapter *adapter = dev->priv;
463         struct cmac *mac = adapter->port[dev->if_port].mac;
464         const struct cmac_statistics *s;
465         const struct sge_intr_counts *t;
466         struct sge_port_stats ss;
467         unsigned int len;
468
469         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
470
471         len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
472         memcpy(data, &s->TxOctetsOK, len);
473         data += len;
474
475         len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
476         memcpy(data, &s->RxOctetsOK, len);
477         data += len;
478
479         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
480         memcpy(data, &ss, sizeof(ss));
481         data += sizeof(ss);
482
483         t = t1_sge_get_intr_counts(adapter->sge);
484         *data++ = t->rx_drops;
485         *data++ = t->pure_rsps;
486         *data++ = t->unhandled_irqs;
487         *data++ = t->respQ_empty;
488         *data++ = t->respQ_overflow;
489         *data++ = t->freelistQ_empty;
490         *data++ = t->pkt_too_big;
491         *data++ = t->pkt_mismatch;
492         *data++ = t->cmdQ_full[0];
493         *data++ = t->cmdQ_full[1];
494
495         if (adapter->espi) {
496                 const struct espi_intr_counts *e;
497
498                 e = t1_espi_get_intr_counts(adapter->espi);
499                 *data++ = e->DIP2_parity_err;
500                 *data++ = e->DIP4_err;
501                 *data++ = e->rx_drops;
502                 *data++ = e->tx_drops;
503                 *data++ = e->rx_ovflw;
504                 *data++ = e->parity_err;
505         }
506 }
507
508 static inline void reg_block_dump(struct adapter *ap, void *buf,
509                                   unsigned int start, unsigned int end)
510 {
511         u32 *p = buf + start;
512
513         for ( ; start <= end; start += sizeof(u32))
514                 *p++ = readl(ap->regs + start);
515 }
516
517 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
518                      void *buf)
519 {
520         struct adapter *ap = dev->priv;
521
522         /*
523          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
524          */
525         regs->version = 2;
526
527         memset(buf, 0, T2_REGMAP_SIZE);
528         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
529         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
530         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
531         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
532         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
533         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
534         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
535         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
536         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
537         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
538 }
539
540 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
541 {
542         struct adapter *adapter = dev->priv;
543         struct port_info *p = &adapter->port[dev->if_port];
544
545         cmd->supported = p->link_config.supported;
546         cmd->advertising = p->link_config.advertising;
547
548         if (netif_carrier_ok(dev)) {
549                 cmd->speed = p->link_config.speed;
550                 cmd->duplex = p->link_config.duplex;
551         } else {
552                 cmd->speed = -1;
553                 cmd->duplex = -1;
554         }
555
556         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
557         cmd->phy_address = p->phy->addr;
558         cmd->transceiver = XCVR_EXTERNAL;
559         cmd->autoneg = p->link_config.autoneg;
560         cmd->maxtxpkt = 0;
561         cmd->maxrxpkt = 0;
562         return 0;
563 }
564
565 static int speed_duplex_to_caps(int speed, int duplex)
566 {
567         int cap = 0;
568
569         switch (speed) {
570         case SPEED_10:
571                 if (duplex == DUPLEX_FULL)
572                         cap = SUPPORTED_10baseT_Full;
573                 else
574                         cap = SUPPORTED_10baseT_Half;
575                 break;
576         case SPEED_100:
577                 if (duplex == DUPLEX_FULL)
578                         cap = SUPPORTED_100baseT_Full;
579                 else
580                         cap = SUPPORTED_100baseT_Half;
581                 break;
582         case SPEED_1000:
583                 if (duplex == DUPLEX_FULL)
584                         cap = SUPPORTED_1000baseT_Full;
585                 else
586                         cap = SUPPORTED_1000baseT_Half;
587                 break;
588         case SPEED_10000:
589                 if (duplex == DUPLEX_FULL)
590                         cap = SUPPORTED_10000baseT_Full;
591         }
592         return cap;
593 }
594
595 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
596                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
597                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
598                       ADVERTISED_10000baseT_Full)
599
600 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
601 {
602         struct adapter *adapter = dev->priv;
603         struct port_info *p = &adapter->port[dev->if_port];
604         struct link_config *lc = &p->link_config;
605
606         if (!(lc->supported & SUPPORTED_Autoneg))
607                 return -EOPNOTSUPP;             /* can't change speed/duplex */
608
609         if (cmd->autoneg == AUTONEG_DISABLE) {
610                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
611
612                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
613                         return -EINVAL;
614                 lc->requested_speed = cmd->speed;
615                 lc->requested_duplex = cmd->duplex;
616                 lc->advertising = 0;
617         } else {
618                 cmd->advertising &= ADVERTISED_MASK;
619                 if (cmd->advertising & (cmd->advertising - 1))
620                         cmd->advertising = lc->supported;
621                 cmd->advertising &= lc->supported;
622                 if (!cmd->advertising)
623                         return -EINVAL;
624                 lc->requested_speed = SPEED_INVALID;
625                 lc->requested_duplex = DUPLEX_INVALID;
626                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
627         }
628         lc->autoneg = cmd->autoneg;
629         if (netif_running(dev))
630                 t1_link_start(p->phy, p->mac, lc);
631         return 0;
632 }
633
634 static void get_pauseparam(struct net_device *dev,
635                            struct ethtool_pauseparam *epause)
636 {
637         struct adapter *adapter = dev->priv;
638         struct port_info *p = &adapter->port[dev->if_port];
639
640         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
641         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
642         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
643 }
644
645 static int set_pauseparam(struct net_device *dev,
646                           struct ethtool_pauseparam *epause)
647 {
648         struct adapter *adapter = dev->priv;
649         struct port_info *p = &adapter->port[dev->if_port];
650         struct link_config *lc = &p->link_config;
651
652         if (epause->autoneg == AUTONEG_DISABLE)
653                 lc->requested_fc = 0;
654         else if (lc->supported & SUPPORTED_Autoneg)
655                 lc->requested_fc = PAUSE_AUTONEG;
656         else
657                 return -EINVAL;
658
659         if (epause->rx_pause)
660                 lc->requested_fc |= PAUSE_RX;
661         if (epause->tx_pause)
662                 lc->requested_fc |= PAUSE_TX;
663         if (lc->autoneg == AUTONEG_ENABLE) {
664                 if (netif_running(dev))
665                         t1_link_start(p->phy, p->mac, lc);
666         } else {
667                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
668                 if (netif_running(dev))
669                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
670                                                          lc->fc);
671         }
672         return 0;
673 }
674
675 static u32 get_rx_csum(struct net_device *dev)
676 {
677         struct adapter *adapter = dev->priv;
678
679         return (adapter->flags & RX_CSUM_ENABLED) != 0;
680 }
681
682 static int set_rx_csum(struct net_device *dev, u32 data)
683 {
684         struct adapter *adapter = dev->priv;
685
686         if (data)
687                 adapter->flags |= RX_CSUM_ENABLED;
688         else
689                 adapter->flags &= ~RX_CSUM_ENABLED;
690         return 0;
691 }
692
693 static int set_tso(struct net_device *dev, u32 value)
694 {
695         struct adapter *adapter = dev->priv;
696
697         if (!(adapter->flags & TSO_CAPABLE))
698                 return value ? -EOPNOTSUPP : 0;
699         return ethtool_op_set_tso(dev, value);
700 }
701
702 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
703 {
704         struct adapter *adapter = dev->priv;
705         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
706
707         e->rx_max_pending = MAX_RX_BUFFERS;
708         e->rx_mini_max_pending = 0;
709         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
710         e->tx_max_pending = MAX_CMDQ_ENTRIES;
711
712         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
713         e->rx_mini_pending = 0;
714         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
715         e->tx_pending = adapter->params.sge.cmdQ_size[0];
716 }
717
718 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
719 {
720         struct adapter *adapter = dev->priv;
721         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
722
723         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
724             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
725             e->tx_pending > MAX_CMDQ_ENTRIES ||
726             e->rx_pending < MIN_FL_ENTRIES ||
727             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
728             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
729                 return -EINVAL;
730
731         if (adapter->flags & FULL_INIT_DONE)
732                 return -EBUSY;
733
734         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
735         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
736         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
737         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
738                 MAX_CMDQ1_ENTRIES : e->tx_pending;
739         return 0;
740 }
741
742 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
743 {
744         struct adapter *adapter = dev->priv;
745
746         adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
747         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
748         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
749         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
750         return 0;
751 }
752
753 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
754 {
755         struct adapter *adapter = dev->priv;
756
757         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
758         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
759         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
760         return 0;
761 }
762
763 static int get_eeprom_len(struct net_device *dev)
764 {
765         struct adapter *adapter = dev->priv;
766
767         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
768 }
769
770 #define EEPROM_MAGIC(ap) \
771         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
772
773 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
774                       u8 *data)
775 {
776         int i;
777         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
778         struct adapter *adapter = dev->priv;
779
780         e->magic = EEPROM_MAGIC(adapter);
781         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
782                 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
783         memcpy(data, buf + e->offset, e->len);
784         return 0;
785 }
786
787 static const struct ethtool_ops t1_ethtool_ops = {
788         .get_settings      = get_settings,
789         .set_settings      = set_settings,
790         .get_drvinfo       = get_drvinfo,
791         .get_msglevel      = get_msglevel,
792         .set_msglevel      = set_msglevel,
793         .get_ringparam     = get_sge_param,
794         .set_ringparam     = set_sge_param,
795         .get_coalesce      = get_coalesce,
796         .set_coalesce      = set_coalesce,
797         .get_eeprom_len    = get_eeprom_len,
798         .get_eeprom        = get_eeprom,
799         .get_pauseparam    = get_pauseparam,
800         .set_pauseparam    = set_pauseparam,
801         .get_rx_csum       = get_rx_csum,
802         .set_rx_csum       = set_rx_csum,
803         .set_tx_csum       = ethtool_op_set_tx_csum,
804         .set_sg            = ethtool_op_set_sg,
805         .get_link          = ethtool_op_get_link,
806         .get_strings       = get_strings,
807         .get_sset_count    = get_sset_count,
808         .get_ethtool_stats = get_stats,
809         .get_regs_len      = get_regs_len,
810         .get_regs          = get_regs,
811         .set_tso           = set_tso,
812 };
813
814 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
815 {
816         struct adapter *adapter = dev->priv;
817         struct mii_ioctl_data *data = if_mii(req);
818
819         switch (cmd) {
820         case SIOCGMIIPHY:
821                 data->phy_id = adapter->port[dev->if_port].phy->addr;
822                 /* FALLTHRU */
823         case SIOCGMIIREG: {
824                 struct cphy *phy = adapter->port[dev->if_port].phy;
825                 u32 val;
826
827                 if (!phy->mdio_read)
828                         return -EOPNOTSUPP;
829                 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
830                                &val);
831                 data->val_out = val;
832                 break;
833         }
834         case SIOCSMIIREG: {
835                 struct cphy *phy = adapter->port[dev->if_port].phy;
836
837                 if (!capable(CAP_NET_ADMIN))
838                     return -EPERM;
839                 if (!phy->mdio_write)
840                         return -EOPNOTSUPP;
841                 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
842                                 data->val_in);
843                 break;
844         }
845
846         default:
847                 return -EOPNOTSUPP;
848         }
849         return 0;
850 }
851
852 static int t1_change_mtu(struct net_device *dev, int new_mtu)
853 {
854         int ret;
855         struct adapter *adapter = dev->priv;
856         struct cmac *mac = adapter->port[dev->if_port].mac;
857
858         if (!mac->ops->set_mtu)
859                 return -EOPNOTSUPP;
860         if (new_mtu < 68)
861                 return -EINVAL;
862         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
863                 return ret;
864         dev->mtu = new_mtu;
865         return 0;
866 }
867
868 static int t1_set_mac_addr(struct net_device *dev, void *p)
869 {
870         struct adapter *adapter = dev->priv;
871         struct cmac *mac = adapter->port[dev->if_port].mac;
872         struct sockaddr *addr = p;
873
874         if (!mac->ops->macaddress_set)
875                 return -EOPNOTSUPP;
876
877         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
878         mac->ops->macaddress_set(mac, dev->dev_addr);
879         return 0;
880 }
881
882 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
883 static void vlan_rx_register(struct net_device *dev,
884                                    struct vlan_group *grp)
885 {
886         struct adapter *adapter = dev->priv;
887
888         spin_lock_irq(&adapter->async_lock);
889         adapter->vlan_grp = grp;
890         t1_set_vlan_accel(adapter, grp != NULL);
891         spin_unlock_irq(&adapter->async_lock);
892 }
893 #endif
894
895 #ifdef CONFIG_NET_POLL_CONTROLLER
896 static void t1_netpoll(struct net_device *dev)
897 {
898         unsigned long flags;
899         struct adapter *adapter = dev->priv;
900
901         local_irq_save(flags);
902         t1_interrupt(adapter->pdev->irq, adapter);
903         local_irq_restore(flags);
904 }
905 #endif
906
907 /*
908  * Periodic accumulation of MAC statistics.  This is used only if the MAC
909  * does not have any other way to prevent stats counter overflow.
910  */
911 static void mac_stats_task(struct work_struct *work)
912 {
913         int i;
914         struct adapter *adapter =
915                 container_of(work, struct adapter, stats_update_task.work);
916
917         for_each_port(adapter, i) {
918                 struct port_info *p = &adapter->port[i];
919
920                 if (netif_running(p->dev))
921                         p->mac->ops->statistics_update(p->mac,
922                                                        MAC_STATS_UPDATE_FAST);
923         }
924
925         /* Schedule the next statistics update if any port is active. */
926         spin_lock(&adapter->work_lock);
927         if (adapter->open_device_map & PORT_MASK)
928                 schedule_mac_stats_update(adapter,
929                                           adapter->params.stats_update_period);
930         spin_unlock(&adapter->work_lock);
931 }
932
933 /*
934  * Processes elmer0 external interrupts in process context.
935  */
936 static void ext_intr_task(struct work_struct *work)
937 {
938         struct adapter *adapter =
939                 container_of(work, struct adapter, ext_intr_handler_task);
940
941         t1_elmer0_ext_intr_handler(adapter);
942
943         /* Now reenable external interrupts */
944         spin_lock_irq(&adapter->async_lock);
945         adapter->slow_intr_mask |= F_PL_INTR_EXT;
946         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
947         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
948                    adapter->regs + A_PL_ENABLE);
949         spin_unlock_irq(&adapter->async_lock);
950 }
951
952 /*
953  * Interrupt-context handler for elmer0 external interrupts.
954  */
955 void t1_elmer0_ext_intr(struct adapter *adapter)
956 {
957         /*
958          * Schedule a task to handle external interrupts as we require
959          * a process context.  We disable EXT interrupts in the interim
960          * and let the task reenable them when it's done.
961          */
962         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
963         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
964                    adapter->regs + A_PL_ENABLE);
965         schedule_work(&adapter->ext_intr_handler_task);
966 }
967
968 void t1_fatal_err(struct adapter *adapter)
969 {
970         if (adapter->flags & FULL_INIT_DONE) {
971                 t1_sge_stop(adapter->sge);
972                 t1_interrupts_disable(adapter);
973         }
974         CH_ALERT("%s: encountered fatal error, operation suspended\n",
975                  adapter->name);
976 }
977
978 static int __devinit init_one(struct pci_dev *pdev,
979                               const struct pci_device_id *ent)
980 {
981         static int version_printed;
982
983         int i, err, pci_using_dac = 0;
984         unsigned long mmio_start, mmio_len;
985         const struct board_info *bi;
986         struct adapter *adapter = NULL;
987         struct port_info *pi;
988
989         if (!version_printed) {
990                 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
991                        DRV_VERSION);
992                 ++version_printed;
993         }
994
995         err = pci_enable_device(pdev);
996         if (err)
997                 return err;
998
999         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1000                 CH_ERR("%s: cannot find PCI device memory base address\n",
1001                        pci_name(pdev));
1002                 err = -ENODEV;
1003                 goto out_disable_pdev;
1004         }
1005
1006         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1007                 pci_using_dac = 1;
1008
1009                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1010                         CH_ERR("%s: unable to obtain 64-bit DMA for"
1011                                "consistent allocations\n", pci_name(pdev));
1012                         err = -ENODEV;
1013                         goto out_disable_pdev;
1014                 }
1015
1016         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1017                 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1018                 goto out_disable_pdev;
1019         }
1020
1021         err = pci_request_regions(pdev, DRV_NAME);
1022         if (err) {
1023                 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1024                 goto out_disable_pdev;
1025         }
1026
1027         pci_set_master(pdev);
1028
1029         mmio_start = pci_resource_start(pdev, 0);
1030         mmio_len = pci_resource_len(pdev, 0);
1031         bi = t1_get_board_info(ent->driver_data);
1032
1033         for (i = 0; i < bi->port_number; ++i) {
1034                 struct net_device *netdev;
1035
1036                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1037                 if (!netdev) {
1038                         err = -ENOMEM;
1039                         goto out_free_dev;
1040                 }
1041
1042                 SET_NETDEV_DEV(netdev, &pdev->dev);
1043
1044                 if (!adapter) {
1045                         adapter = netdev->priv;
1046                         adapter->pdev = pdev;
1047                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1048
1049                         adapter->regs = ioremap(mmio_start, mmio_len);
1050                         if (!adapter->regs) {
1051                                 CH_ERR("%s: cannot map device registers\n",
1052                                        pci_name(pdev));
1053                                 err = -ENOMEM;
1054                                 goto out_free_dev;
1055                         }
1056
1057                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1058                                 err = -ENODEV;    /* Can't handle this chip rev */
1059                                 goto out_free_dev;
1060                         }
1061
1062                         adapter->name = pci_name(pdev);
1063                         adapter->msg_enable = dflt_msg_enable;
1064                         adapter->mmio_len = mmio_len;
1065
1066                         spin_lock_init(&adapter->tpi_lock);
1067                         spin_lock_init(&adapter->work_lock);
1068                         spin_lock_init(&adapter->async_lock);
1069                         spin_lock_init(&adapter->mac_lock);
1070
1071                         INIT_WORK(&adapter->ext_intr_handler_task,
1072                                   ext_intr_task);
1073                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1074                                           mac_stats_task);
1075
1076                         pci_set_drvdata(pdev, netdev);
1077                 }
1078
1079                 pi = &adapter->port[i];
1080                 pi->dev = netdev;
1081                 netif_carrier_off(netdev);
1082                 netdev->irq = pdev->irq;
1083                 netdev->if_port = i;
1084                 netdev->mem_start = mmio_start;
1085                 netdev->mem_end = mmio_start + mmio_len - 1;
1086                 netdev->priv = adapter;
1087                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1088                 netdev->features |= NETIF_F_LLTX;
1089
1090                 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1091                 if (pci_using_dac)
1092                         netdev->features |= NETIF_F_HIGHDMA;
1093                 if (vlan_tso_capable(adapter)) {
1094 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1095                         adapter->flags |= VLAN_ACCEL_CAPABLE;
1096                         netdev->features |=
1097                                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1098                         netdev->vlan_rx_register = vlan_rx_register;
1099 #endif
1100
1101                         /* T204: disable TSO */
1102                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1103                                 adapter->flags |= TSO_CAPABLE;
1104                                 netdev->features |= NETIF_F_TSO;
1105                         }
1106                 }
1107
1108                 netdev->open = cxgb_open;
1109                 netdev->stop = cxgb_close;
1110                 netdev->hard_start_xmit = t1_start_xmit;
1111                 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1112                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1113                 netdev->get_stats = t1_get_stats;
1114                 netdev->set_multicast_list = t1_set_rxmode;
1115                 netdev->do_ioctl = t1_ioctl;
1116                 netdev->change_mtu = t1_change_mtu;
1117                 netdev->set_mac_address = t1_set_mac_addr;
1118 #ifdef CONFIG_NET_POLL_CONTROLLER
1119                 netdev->poll_controller = t1_netpoll;
1120 #endif
1121 #ifdef CONFIG_CHELSIO_T1_NAPI
1122                 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1123 #endif
1124
1125                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1126         }
1127
1128         if (t1_init_sw_modules(adapter, bi) < 0) {
1129                 err = -ENODEV;
1130                 goto out_free_dev;
1131         }
1132
1133         /*
1134          * The card is now ready to go.  If any errors occur during device
1135          * registration we do not fail the whole card but rather proceed only
1136          * with the ports we manage to register successfully.  However we must
1137          * register at least one net device.
1138          */
1139         for (i = 0; i < bi->port_number; ++i) {
1140                 err = register_netdev(adapter->port[i].dev);
1141                 if (err)
1142                         CH_WARN("%s: cannot register net device %s, skipping\n",
1143                                 pci_name(pdev), adapter->port[i].dev->name);
1144                 else {
1145                         /*
1146                          * Change the name we use for messages to the name of
1147                          * the first successfully registered interface.
1148                          */
1149                         if (!adapter->registered_device_map)
1150                                 adapter->name = adapter->port[i].dev->name;
1151
1152                         __set_bit(i, &adapter->registered_device_map);
1153                 }
1154         }
1155         if (!adapter->registered_device_map) {
1156                 CH_ERR("%s: could not register any net devices\n",
1157                        pci_name(pdev));
1158                 goto out_release_adapter_res;
1159         }
1160
1161         printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1162                bi->desc, adapter->params.chip_revision,
1163                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1164                adapter->params.pci.speed, adapter->params.pci.width);
1165
1166         /*
1167          * Set the T1B ASIC and memory clocks.
1168          */
1169         if (t1powersave)
1170                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1171         else
1172                 adapter->t1powersave = HCLOCK;
1173         if (t1_is_T1B(adapter))
1174                 t1_clock(adapter, t1powersave);
1175
1176         return 0;
1177
1178 out_release_adapter_res:
1179         t1_free_sw_modules(adapter);
1180 out_free_dev:
1181         if (adapter) {
1182                 if (adapter->regs)
1183                         iounmap(adapter->regs);
1184                 for (i = bi->port_number - 1; i >= 0; --i)
1185                         if (adapter->port[i].dev)
1186                                 free_netdev(adapter->port[i].dev);
1187         }
1188         pci_release_regions(pdev);
1189 out_disable_pdev:
1190         pci_disable_device(pdev);
1191         pci_set_drvdata(pdev, NULL);
1192         return err;
1193 }
1194
1195 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1196 {
1197         int data;
1198         int i;
1199         u32 val;
1200
1201         enum {
1202                 S_CLOCK = 1 << 3,
1203                 S_DATA = 1 << 4
1204         };
1205
1206         for (i = (nbits - 1); i > -1; i--) {
1207
1208                 udelay(50);
1209
1210                 data = ((bitdata >> i) & 0x1);
1211                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1212
1213                 if (data)
1214                         val |= S_DATA;
1215                 else
1216                         val &= ~S_DATA;
1217
1218                 udelay(50);
1219
1220                 /* Set SCLOCK low */
1221                 val &= ~S_CLOCK;
1222                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1223
1224                 udelay(50);
1225
1226                 /* Write SCLOCK high */
1227                 val |= S_CLOCK;
1228                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1229
1230         }
1231 }
1232
1233 static int t1_clock(struct adapter *adapter, int mode)
1234 {
1235         u32 val;
1236         int M_CORE_VAL;
1237         int M_MEM_VAL;
1238
1239         enum {
1240                 M_CORE_BITS     = 9,
1241                 T_CORE_VAL      = 0,
1242                 T_CORE_BITS     = 2,
1243                 N_CORE_VAL      = 0,
1244                 N_CORE_BITS     = 2,
1245                 M_MEM_BITS      = 9,
1246                 T_MEM_VAL       = 0,
1247                 T_MEM_BITS      = 2,
1248                 N_MEM_VAL       = 0,
1249                 N_MEM_BITS      = 2,
1250                 NP_LOAD         = 1 << 17,
1251                 S_LOAD_MEM      = 1 << 5,
1252                 S_LOAD_CORE     = 1 << 6,
1253                 S_CLOCK         = 1 << 3
1254         };
1255
1256         if (!t1_is_T1B(adapter))
1257                 return -ENODEV; /* Can't re-clock this chip. */
1258
1259         if (mode & 2)
1260                 return 0;       /* show current mode. */
1261
1262         if ((adapter->t1powersave & 1) == (mode & 1))
1263                 return -EALREADY;       /* ASIC already running in mode. */
1264
1265         if ((mode & 1) == HCLOCK) {
1266                 M_CORE_VAL = 0x14;
1267                 M_MEM_VAL = 0x18;
1268                 adapter->t1powersave = HCLOCK;  /* overclock */
1269         } else {
1270                 M_CORE_VAL = 0xe;
1271                 M_MEM_VAL = 0x10;
1272                 adapter->t1powersave = LCLOCK;  /* underclock */
1273         }
1274
1275         /* Don't interrupt this serial stream! */
1276         spin_lock(&adapter->tpi_lock);
1277
1278         /* Initialize for ASIC core */
1279         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1280         val |= NP_LOAD;
1281         udelay(50);
1282         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1283         udelay(50);
1284         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1285         val &= ~S_LOAD_CORE;
1286         val &= ~S_CLOCK;
1287         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1288         udelay(50);
1289
1290         /* Serial program the ASIC clock synthesizer */
1291         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1292         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1293         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1294         udelay(50);
1295
1296         /* Finish ASIC core */
1297         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1298         val |= S_LOAD_CORE;
1299         udelay(50);
1300         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1301         udelay(50);
1302         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1303         val &= ~S_LOAD_CORE;
1304         udelay(50);
1305         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1306         udelay(50);
1307
1308         /* Initialize for memory */
1309         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1310         val |= NP_LOAD;
1311         udelay(50);
1312         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1313         udelay(50);
1314         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1315         val &= ~S_LOAD_MEM;
1316         val &= ~S_CLOCK;
1317         udelay(50);
1318         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319         udelay(50);
1320
1321         /* Serial program the memory clock synthesizer */
1322         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1323         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1324         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1325         udelay(50);
1326
1327         /* Finish memory */
1328         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1329         val |= S_LOAD_MEM;
1330         udelay(50);
1331         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1332         udelay(50);
1333         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1334         val &= ~S_LOAD_MEM;
1335         udelay(50);
1336         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1337
1338         spin_unlock(&adapter->tpi_lock);
1339
1340         return 0;
1341 }
1342
1343 static inline void t1_sw_reset(struct pci_dev *pdev)
1344 {
1345         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1346         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1347 }
1348
1349 static void __devexit remove_one(struct pci_dev *pdev)
1350 {
1351         struct net_device *dev = pci_get_drvdata(pdev);
1352         struct adapter *adapter = dev->priv;
1353         int i;
1354
1355         for_each_port(adapter, i) {
1356                 if (test_bit(i, &adapter->registered_device_map))
1357                         unregister_netdev(adapter->port[i].dev);
1358         }
1359
1360         t1_free_sw_modules(adapter);
1361         iounmap(adapter->regs);
1362
1363         while (--i >= 0) {
1364                 if (adapter->port[i].dev)
1365                         free_netdev(adapter->port[i].dev);
1366         }
1367
1368         pci_release_regions(pdev);
1369         pci_disable_device(pdev);
1370         pci_set_drvdata(pdev, NULL);
1371         t1_sw_reset(pdev);
1372 }
1373
1374 static struct pci_driver driver = {
1375         .name     = DRV_NAME,
1376         .id_table = t1_pci_tbl,
1377         .probe    = init_one,
1378         .remove   = __devexit_p(remove_one),
1379 };
1380
1381 static int __init t1_init_module(void)
1382 {
1383         return pci_register_driver(&driver);
1384 }
1385
1386 static void __exit t1_cleanup_module(void)
1387 {
1388         pci_unregister_driver(&driver);
1389 }
1390
1391 module_init(t1_init_module);
1392 module_exit(t1_cleanup_module);