]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/cxgb3/cxgb3_main.c
ec35d3b8240925738e6a22e8a5e9dc76b2c42937
[linux-2.6-omap-h63xx.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         {0,}
95 };
96
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108 /*
109  * The driver uses the best interrupt scheme available on a platform in the
110  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
111  * of these schemes the driver may consider as follows:
112  *
113  * msi = 2: choose from among all three options
114  * msi = 1: only consider MSI and pin interrupts
115  * msi = 0: force pin interrupts
116  */
117 static int msi = 2;
118
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122 /*
123  * The driver enables offload as a default.
124  * To disable it, use ofld_disable = 1.
125  */
126
127 static int ofld_disable = 0;
128
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132 /*
133  * We have work elements that we need to cancel when an interface is taken
134  * down.  Normally the work elements would be executed by keventd but that
135  * can deadlock because of linkwatch.  If our close method takes the rtnl
136  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138  * for our work to complete.  Get our own work queue to solve this.
139  */
140 static struct workqueue_struct *cxgb3_wq;
141
142 /**
143  *      link_report - show link status and link speed/duplex
144  *      @p: the port whose settings are to be reported
145  *
146  *      Shows the link status, speed, and duplex of a port.
147  */
148 static void link_report(struct net_device *dev)
149 {
150         if (!netif_carrier_ok(dev))
151                 printk(KERN_INFO "%s: link down\n", dev->name);
152         else {
153                 const char *s = "10Mbps";
154                 const struct port_info *p = netdev_priv(dev);
155
156                 switch (p->link_config.speed) {
157                 case SPEED_10000:
158                         s = "10Gbps";
159                         break;
160                 case SPEED_1000:
161                         s = "1000Mbps";
162                         break;
163                 case SPEED_100:
164                         s = "100Mbps";
165                         break;
166                 }
167
168                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170         }
171 }
172
173 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
174 {
175         struct net_device *dev = adap->port[port_id];
176         struct port_info *pi = netdev_priv(dev);
177
178         if (state == netif_carrier_ok(dev))
179                 return;
180
181         if (state) {
182                 struct cmac *mac = &pi->mac;
183
184                 netif_carrier_on(dev);
185
186                 /* Clear local faults */
187                 t3_xgm_intr_disable(adap, pi->port_id);
188                 t3_read_reg(adap, A_XGM_INT_STATUS +
189                                     pi->mac.offset);
190                 t3_write_reg(adap,
191                              A_XGM_INT_CAUSE + pi->mac.offset,
192                              F_XGM_INT);
193
194                 t3_set_reg_field(adap,
195                                  A_XGM_INT_ENABLE +
196                                  pi->mac.offset,
197                                  F_XGM_INT, F_XGM_INT);
198                 t3_xgm_intr_enable(adap, pi->port_id);
199
200                 t3_mac_enable(mac, MAC_DIRECTION_TX);
201         } else
202                 netif_carrier_off(dev);
203
204         link_report(dev);
205 }
206
207 /**
208  *      t3_os_link_changed - handle link status changes
209  *      @adapter: the adapter associated with the link change
210  *      @port_id: the port index whose limk status has changed
211  *      @link_stat: the new status of the link
212  *      @speed: the new speed setting
213  *      @duplex: the new duplex setting
214  *      @pause: the new flow-control setting
215  *
216  *      This is the OS-dependent handler for link status changes.  The OS
217  *      neutral handler takes care of most of the processing for these events,
218  *      then calls this handler for any OS-specific processing.
219  */
220 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221                         int speed, int duplex, int pause)
222 {
223         struct net_device *dev = adapter->port[port_id];
224         struct port_info *pi = netdev_priv(dev);
225         struct cmac *mac = &pi->mac;
226
227         /* Skip changes from disabled ports. */
228         if (!netif_running(dev))
229                 return;
230
231         if (link_stat != netif_carrier_ok(dev)) {
232                 if (link_stat) {
233                         t3_mac_enable(mac, MAC_DIRECTION_RX);
234
235                         /* Clear local faults */
236                         t3_xgm_intr_disable(adapter, pi->port_id);
237                         t3_read_reg(adapter, A_XGM_INT_STATUS +
238                                     pi->mac.offset);
239                         t3_write_reg(adapter,
240                                      A_XGM_INT_CAUSE + pi->mac.offset,
241                                      F_XGM_INT);
242
243                         t3_set_reg_field(adapter,
244                                          A_XGM_INT_ENABLE + pi->mac.offset,
245                                          F_XGM_INT, F_XGM_INT);
246                         t3_xgm_intr_enable(adapter, pi->port_id);
247
248                         netif_carrier_on(dev);
249                 } else {
250                         netif_carrier_off(dev);
251
252                         t3_xgm_intr_disable(adapter, pi->port_id);
253                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254                         t3_set_reg_field(adapter,
255                                          A_XGM_INT_ENABLE + pi->mac.offset,
256                                          F_XGM_INT, 0);
257
258                         if (is_10G(adapter))
259                                 pi->phy.ops->power_down(&pi->phy, 1);
260
261                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
262                         t3_mac_disable(mac, MAC_DIRECTION_RX);
263                         t3_link_start(&pi->phy, mac, &pi->link_config);
264                 }
265
266                 link_report(dev);
267         }
268 }
269
270 /**
271  *      t3_os_phymod_changed - handle PHY module changes
272  *      @phy: the PHY reporting the module change
273  *      @mod_type: new module type
274  *
275  *      This is the OS-dependent handler for PHY module changes.  It is
276  *      invoked when a PHY module is removed or inserted for any OS-specific
277  *      processing.
278  */
279 void t3_os_phymod_changed(struct adapter *adap, int port_id)
280 {
281         static const char *mod_str[] = {
282                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
283         };
284
285         const struct net_device *dev = adap->port[port_id];
286         const struct port_info *pi = netdev_priv(dev);
287
288         if (pi->phy.modtype == phy_modtype_none)
289                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
290         else
291                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292                        mod_str[pi->phy.modtype]);
293 }
294
295 static void cxgb_set_rxmode(struct net_device *dev)
296 {
297         struct t3_rx_mode rm;
298         struct port_info *pi = netdev_priv(dev);
299
300         init_rx_mode(&rm, dev, dev->mc_list);
301         t3_mac_set_rx_mode(&pi->mac, &rm);
302 }
303
304 /**
305  *      link_start - enable a port
306  *      @dev: the device to enable
307  *
308  *      Performs the MAC and PHY actions needed to enable a port.
309  */
310 static void link_start(struct net_device *dev)
311 {
312         struct t3_rx_mode rm;
313         struct port_info *pi = netdev_priv(dev);
314         struct cmac *mac = &pi->mac;
315
316         init_rx_mode(&rm, dev, dev->mc_list);
317         t3_mac_reset(mac);
318         t3_mac_set_mtu(mac, dev->mtu);
319         t3_mac_set_address(mac, 0, dev->dev_addr);
320         t3_mac_set_rx_mode(mac, &rm);
321         t3_link_start(&pi->phy, mac, &pi->link_config);
322         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
323 }
324
325 static inline void cxgb_disable_msi(struct adapter *adapter)
326 {
327         if (adapter->flags & USING_MSIX) {
328                 pci_disable_msix(adapter->pdev);
329                 adapter->flags &= ~USING_MSIX;
330         } else if (adapter->flags & USING_MSI) {
331                 pci_disable_msi(adapter->pdev);
332                 adapter->flags &= ~USING_MSI;
333         }
334 }
335
336 /*
337  * Interrupt handler for asynchronous events used with MSI-X.
338  */
339 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
340 {
341         t3_slow_intr_handler(cookie);
342         return IRQ_HANDLED;
343 }
344
345 /*
346  * Name the MSI-X interrupts.
347  */
348 static void name_msix_vecs(struct adapter *adap)
349 {
350         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
351
352         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353         adap->msix_info[0].desc[n] = 0;
354
355         for_each_port(adap, j) {
356                 struct net_device *d = adap->port[j];
357                 const struct port_info *pi = netdev_priv(d);
358
359                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360                         snprintf(adap->msix_info[msi_idx].desc, n,
361                                  "%s-%d", d->name, pi->first_qset + i);
362                         adap->msix_info[msi_idx].desc[n] = 0;
363                 }
364         }
365 }
366
367 static int request_msix_data_irqs(struct adapter *adap)
368 {
369         int i, j, err, qidx = 0;
370
371         for_each_port(adap, i) {
372                 int nqsets = adap2pinfo(adap, i)->nqsets;
373
374                 for (j = 0; j < nqsets; ++j) {
375                         err = request_irq(adap->msix_info[qidx + 1].vec,
376                                           t3_intr_handler(adap,
377                                                           adap->sge.qs[qidx].
378                                                           rspq.polling), 0,
379                                           adap->msix_info[qidx + 1].desc,
380                                           &adap->sge.qs[qidx]);
381                         if (err) {
382                                 while (--qidx >= 0)
383                                         free_irq(adap->msix_info[qidx + 1].vec,
384                                                  &adap->sge.qs[qidx]);
385                                 return err;
386                         }
387                         qidx++;
388                 }
389         }
390         return 0;
391 }
392
393 static void free_irq_resources(struct adapter *adapter)
394 {
395         if (adapter->flags & USING_MSIX) {
396                 int i, n = 0;
397
398                 free_irq(adapter->msix_info[0].vec, adapter);
399                 for_each_port(adapter, i)
400                         n += adap2pinfo(adapter, i)->nqsets;
401
402                 for (i = 0; i < n; ++i)
403                         free_irq(adapter->msix_info[i + 1].vec,
404                                  &adapter->sge.qs[i]);
405         } else
406                 free_irq(adapter->pdev->irq, adapter);
407 }
408
409 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
410                               unsigned long n)
411 {
412         int attempts = 5;
413
414         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
415                 if (!--attempts)
416                         return -ETIMEDOUT;
417                 msleep(10);
418         }
419         return 0;
420 }
421
422 static int init_tp_parity(struct adapter *adap)
423 {
424         int i;
425         struct sk_buff *skb;
426         struct cpl_set_tcb_field *greq;
427         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
428
429         t3_tp_set_offload_mode(adap, 1);
430
431         for (i = 0; i < 16; i++) {
432                 struct cpl_smt_write_req *req;
433
434                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436                 memset(req, 0, sizeof(*req));
437                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439                 req->iff = i;
440                 t3_mgmt_tx(adap, skb);
441         }
442
443         for (i = 0; i < 2048; i++) {
444                 struct cpl_l2t_write_req *req;
445
446                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448                 memset(req, 0, sizeof(*req));
449                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451                 req->params = htonl(V_L2T_W_IDX(i));
452                 t3_mgmt_tx(adap, skb);
453         }
454
455         for (i = 0; i < 2048; i++) {
456                 struct cpl_rte_write_req *req;
457
458                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460                 memset(req, 0, sizeof(*req));
461                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464                 t3_mgmt_tx(adap, skb);
465         }
466
467         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469         memset(greq, 0, sizeof(*greq));
470         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472         greq->mask = cpu_to_be64(1);
473         t3_mgmt_tx(adap, skb);
474
475         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476         t3_tp_set_offload_mode(adap, 0);
477         return i;
478 }
479
480 /**
481  *      setup_rss - configure RSS
482  *      @adap: the adapter
483  *
484  *      Sets up RSS to distribute packets to multiple receive queues.  We
485  *      configure the RSS CPU lookup table to distribute to the number of HW
486  *      receive queues, and the response queue lookup table to narrow that
487  *      down to the response queues actually configured for each port.
488  *      We always configure the RSS mapping for two ports since the mapping
489  *      table has plenty of entries.
490  */
491 static void setup_rss(struct adapter *adap)
492 {
493         int i;
494         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496         u8 cpus[SGE_QSETS + 1];
497         u16 rspq_map[RSS_TABLE_SIZE];
498
499         for (i = 0; i < SGE_QSETS; ++i)
500                 cpus[i] = i;
501         cpus[SGE_QSETS] = 0xff; /* terminator */
502
503         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504                 rspq_map[i] = i % nq0;
505                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
506         }
507
508         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
510                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
511 }
512
513 static void init_napi(struct adapter *adap)
514 {
515         int i;
516
517         for (i = 0; i < SGE_QSETS; i++) {
518                 struct sge_qset *qs = &adap->sge.qs[i];
519
520                 if (qs->adap)
521                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
522                                        64);
523         }
524
525         /*
526          * netif_napi_add() can be called only once per napi_struct because it
527          * adds each new napi_struct to a list.  Be careful not to call it a
528          * second time, e.g., during EEH recovery, by making a note of it.
529          */
530         adap->flags |= NAPI_INIT;
531 }
532
533 /*
534  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
535  * both netdevices representing interfaces and the dummy ones for the extra
536  * queues.
537  */
538 static void quiesce_rx(struct adapter *adap)
539 {
540         int i;
541
542         for (i = 0; i < SGE_QSETS; i++)
543                 if (adap->sge.qs[i].adap)
544                         napi_disable(&adap->sge.qs[i].napi);
545 }
546
547 static void enable_all_napi(struct adapter *adap)
548 {
549         int i;
550         for (i = 0; i < SGE_QSETS; i++)
551                 if (adap->sge.qs[i].adap)
552                         napi_enable(&adap->sge.qs[i].napi);
553 }
554
555 /**
556  *      set_qset_lro - Turn a queue set's LRO capability on and off
557  *      @dev: the device the qset is attached to
558  *      @qset_idx: the queue set index
559  *      @val: the LRO switch
560  *
561  *      Sets LRO on or off for a particular queue set.
562  *      the device's features flag is updated to reflect the LRO
563  *      capability when all queues belonging to the device are
564  *      in the same state.
565  */
566 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
567 {
568         struct port_info *pi = netdev_priv(dev);
569         struct adapter *adapter = pi->adapter;
570
571         adapter->params.sge.qset[qset_idx].lro = !!val;
572         adapter->sge.qs[qset_idx].lro_enabled = !!val;
573 }
574
575 /**
576  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
577  *      @adap: the adapter
578  *
579  *      Determines how many sets of SGE queues to use and initializes them.
580  *      We support multiple queue sets per port if we have MSI-X, otherwise
581  *      just one queue set per port.
582  */
583 static int setup_sge_qsets(struct adapter *adap)
584 {
585         int i, j, err, irq_idx = 0, qset_idx = 0;
586         unsigned int ntxq = SGE_TXQ_PER_SET;
587
588         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
589                 irq_idx = -1;
590
591         for_each_port(adap, i) {
592                 struct net_device *dev = adap->port[i];
593                 struct port_info *pi = netdev_priv(dev);
594
595                 pi->qs = &adap->sge.qs[pi->first_qset];
596                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
597                      ++j, ++qset_idx) {
598                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
599                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
600                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
601                                                              irq_idx,
602                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
603                                 netdev_get_tx_queue(dev, j));
604                         if (err) {
605                                 t3_free_sge_resources(adap);
606                                 return err;
607                         }
608                 }
609         }
610
611         return 0;
612 }
613
614 static ssize_t attr_show(struct device *d, char *buf,
615                          ssize_t(*format) (struct net_device *, char *))
616 {
617         ssize_t len;
618
619         /* Synchronize with ioctls that may shut down the device */
620         rtnl_lock();
621         len = (*format) (to_net_dev(d), buf);
622         rtnl_unlock();
623         return len;
624 }
625
626 static ssize_t attr_store(struct device *d,
627                           const char *buf, size_t len,
628                           ssize_t(*set) (struct net_device *, unsigned int),
629                           unsigned int min_val, unsigned int max_val)
630 {
631         char *endp;
632         ssize_t ret;
633         unsigned int val;
634
635         if (!capable(CAP_NET_ADMIN))
636                 return -EPERM;
637
638         val = simple_strtoul(buf, &endp, 0);
639         if (endp == buf || val < min_val || val > max_val)
640                 return -EINVAL;
641
642         rtnl_lock();
643         ret = (*set) (to_net_dev(d), val);
644         if (!ret)
645                 ret = len;
646         rtnl_unlock();
647         return ret;
648 }
649
650 #define CXGB3_SHOW(name, val_expr) \
651 static ssize_t format_##name(struct net_device *dev, char *buf) \
652 { \
653         struct port_info *pi = netdev_priv(dev); \
654         struct adapter *adap = pi->adapter; \
655         return sprintf(buf, "%u\n", val_expr); \
656 } \
657 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
658                            char *buf) \
659 { \
660         return attr_show(d, buf, format_##name); \
661 }
662
663 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
664 {
665         struct port_info *pi = netdev_priv(dev);
666         struct adapter *adap = pi->adapter;
667         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
668
669         if (adap->flags & FULL_INIT_DONE)
670                 return -EBUSY;
671         if (val && adap->params.rev == 0)
672                 return -EINVAL;
673         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
674             min_tids)
675                 return -EINVAL;
676         adap->params.mc5.nfilters = val;
677         return 0;
678 }
679
680 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
681                               const char *buf, size_t len)
682 {
683         return attr_store(d, buf, len, set_nfilters, 0, ~0);
684 }
685
686 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
687 {
688         struct port_info *pi = netdev_priv(dev);
689         struct adapter *adap = pi->adapter;
690
691         if (adap->flags & FULL_INIT_DONE)
692                 return -EBUSY;
693         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
694             MC5_MIN_TIDS)
695                 return -EINVAL;
696         adap->params.mc5.nservers = val;
697         return 0;
698 }
699
700 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
701                               const char *buf, size_t len)
702 {
703         return attr_store(d, buf, len, set_nservers, 0, ~0);
704 }
705
706 #define CXGB3_ATTR_R(name, val_expr) \
707 CXGB3_SHOW(name, val_expr) \
708 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
709
710 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
711 CXGB3_SHOW(name, val_expr) \
712 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
713
714 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
715 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
716 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
717
718 static struct attribute *cxgb3_attrs[] = {
719         &dev_attr_cam_size.attr,
720         &dev_attr_nfilters.attr,
721         &dev_attr_nservers.attr,
722         NULL
723 };
724
725 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
726
727 static ssize_t tm_attr_show(struct device *d,
728                             char *buf, int sched)
729 {
730         struct port_info *pi = netdev_priv(to_net_dev(d));
731         struct adapter *adap = pi->adapter;
732         unsigned int v, addr, bpt, cpt;
733         ssize_t len;
734
735         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
736         rtnl_lock();
737         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
738         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
739         if (sched & 1)
740                 v >>= 16;
741         bpt = (v >> 8) & 0xff;
742         cpt = v & 0xff;
743         if (!cpt)
744                 len = sprintf(buf, "disabled\n");
745         else {
746                 v = (adap->params.vpd.cclk * 1000) / cpt;
747                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
748         }
749         rtnl_unlock();
750         return len;
751 }
752
753 static ssize_t tm_attr_store(struct device *d,
754                              const char *buf, size_t len, int sched)
755 {
756         struct port_info *pi = netdev_priv(to_net_dev(d));
757         struct adapter *adap = pi->adapter;
758         unsigned int val;
759         char *endp;
760         ssize_t ret;
761
762         if (!capable(CAP_NET_ADMIN))
763                 return -EPERM;
764
765         val = simple_strtoul(buf, &endp, 0);
766         if (endp == buf || val > 10000000)
767                 return -EINVAL;
768
769         rtnl_lock();
770         ret = t3_config_sched(adap, val, sched);
771         if (!ret)
772                 ret = len;
773         rtnl_unlock();
774         return ret;
775 }
776
777 #define TM_ATTR(name, sched) \
778 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
779                            char *buf) \
780 { \
781         return tm_attr_show(d, buf, sched); \
782 } \
783 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
784                             const char *buf, size_t len) \
785 { \
786         return tm_attr_store(d, buf, len, sched); \
787 } \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
789
790 TM_ATTR(sched0, 0);
791 TM_ATTR(sched1, 1);
792 TM_ATTR(sched2, 2);
793 TM_ATTR(sched3, 3);
794 TM_ATTR(sched4, 4);
795 TM_ATTR(sched5, 5);
796 TM_ATTR(sched6, 6);
797 TM_ATTR(sched7, 7);
798
799 static struct attribute *offload_attrs[] = {
800         &dev_attr_sched0.attr,
801         &dev_attr_sched1.attr,
802         &dev_attr_sched2.attr,
803         &dev_attr_sched3.attr,
804         &dev_attr_sched4.attr,
805         &dev_attr_sched5.attr,
806         &dev_attr_sched6.attr,
807         &dev_attr_sched7.attr,
808         NULL
809 };
810
811 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
812
813 /*
814  * Sends an sk_buff to an offload queue driver
815  * after dealing with any active network taps.
816  */
817 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
818 {
819         int ret;
820
821         local_bh_disable();
822         ret = t3_offload_tx(tdev, skb);
823         local_bh_enable();
824         return ret;
825 }
826
827 static int write_smt_entry(struct adapter *adapter, int idx)
828 {
829         struct cpl_smt_write_req *req;
830         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
831
832         if (!skb)
833                 return -ENOMEM;
834
835         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
836         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
837         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
838         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
839         req->iff = idx;
840         memset(req->src_mac1, 0, sizeof(req->src_mac1));
841         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
842         skb->priority = 1;
843         offload_tx(&adapter->tdev, skb);
844         return 0;
845 }
846
847 static int init_smt(struct adapter *adapter)
848 {
849         int i;
850
851         for_each_port(adapter, i)
852             write_smt_entry(adapter, i);
853         return 0;
854 }
855
856 static void init_port_mtus(struct adapter *adapter)
857 {
858         unsigned int mtus = adapter->port[0]->mtu;
859
860         if (adapter->port[1])
861                 mtus |= adapter->port[1]->mtu << 16;
862         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
863 }
864
865 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
866                               int hi, int port)
867 {
868         struct sk_buff *skb;
869         struct mngt_pktsched_wr *req;
870         int ret;
871
872         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
873         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
876         req->sched = sched;
877         req->idx = qidx;
878         req->min = lo;
879         req->max = hi;
880         req->binding = port;
881         ret = t3_mgmt_tx(adap, skb);
882
883         return ret;
884 }
885
886 static int bind_qsets(struct adapter *adap)
887 {
888         int i, j, err = 0;
889
890         for_each_port(adap, i) {
891                 const struct port_info *pi = adap2pinfo(adap, i);
892
893                 for (j = 0; j < pi->nqsets; ++j) {
894                         int ret = send_pktsched_cmd(adap, 1,
895                                                     pi->first_qset + j, -1,
896                                                     -1, i);
897                         if (ret)
898                                 err = ret;
899                 }
900         }
901
902         return err;
903 }
904
905 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
906 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
907
908 static int upgrade_fw(struct adapter *adap)
909 {
910         int ret;
911         char buf[64];
912         const struct firmware *fw;
913         struct device *dev = &adap->pdev->dev;
914
915         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
916                  FW_VERSION_MINOR, FW_VERSION_MICRO);
917         ret = request_firmware(&fw, buf, dev);
918         if (ret < 0) {
919                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
920                         buf);
921                 return ret;
922         }
923         ret = t3_load_fw(adap, fw->data, fw->size);
924         release_firmware(fw);
925
926         if (ret == 0)
927                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
928                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
929         else
930                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
931                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
932
933         return ret;
934 }
935
936 static inline char t3rev2char(struct adapter *adapter)
937 {
938         char rev = 0;
939
940         switch(adapter->params.rev) {
941         case T3_REV_B:
942         case T3_REV_B2:
943                 rev = 'b';
944                 break;
945         case T3_REV_C:
946                 rev = 'c';
947                 break;
948         }
949         return rev;
950 }
951
952 static int update_tpsram(struct adapter *adap)
953 {
954         const struct firmware *tpsram;
955         char buf[64];
956         struct device *dev = &adap->pdev->dev;
957         int ret;
958         char rev;
959
960         rev = t3rev2char(adap);
961         if (!rev)
962                 return 0;
963
964         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
965                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
966
967         ret = request_firmware(&tpsram, buf, dev);
968         if (ret < 0) {
969                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
970                         buf);
971                 return ret;
972         }
973
974         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
975         if (ret)
976                 goto release_tpsram;
977
978         ret = t3_set_proto_sram(adap, tpsram->data);
979         if (ret == 0)
980                 dev_info(dev,
981                          "successful update of protocol engine "
982                          "to %d.%d.%d\n",
983                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
984         else
985                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
986                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
987         if (ret)
988                 dev_err(dev, "loading protocol SRAM failed\n");
989
990 release_tpsram:
991         release_firmware(tpsram);
992
993         return ret;
994 }
995
996 /**
997  *      cxgb_up - enable the adapter
998  *      @adapter: adapter being enabled
999  *
1000  *      Called when the first port is enabled, this function performs the
1001  *      actions necessary to make an adapter operational, such as completing
1002  *      the initialization of HW modules, and enabling interrupts.
1003  *
1004  *      Must be called with the rtnl lock held.
1005  */
1006 static int cxgb_up(struct adapter *adap)
1007 {
1008         int err;
1009
1010         if (!(adap->flags & FULL_INIT_DONE)) {
1011                 err = t3_check_fw_version(adap);
1012                 if (err == -EINVAL) {
1013                         err = upgrade_fw(adap);
1014                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1015                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1016                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1017                 }
1018
1019                 err = t3_check_tpsram_version(adap);
1020                 if (err == -EINVAL) {
1021                         err = update_tpsram(adap);
1022                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1023                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1024                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1025                 }
1026
1027                 /*
1028                  * Clear interrupts now to catch errors if t3_init_hw fails.
1029                  * We clear them again later as initialization may trigger
1030                  * conditions that can interrupt.
1031                  */
1032                 t3_intr_clear(adap);
1033
1034                 err = t3_init_hw(adap, 0);
1035                 if (err)
1036                         goto out;
1037
1038                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1039                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1040
1041                 err = setup_sge_qsets(adap);
1042                 if (err)
1043                         goto out;
1044
1045                 setup_rss(adap);
1046                 if (!(adap->flags & NAPI_INIT))
1047                         init_napi(adap);
1048
1049                 t3_start_sge_timers(adap);
1050                 adap->flags |= FULL_INIT_DONE;
1051         }
1052
1053         t3_intr_clear(adap);
1054
1055         if (adap->flags & USING_MSIX) {
1056                 name_msix_vecs(adap);
1057                 err = request_irq(adap->msix_info[0].vec,
1058                                   t3_async_intr_handler, 0,
1059                                   adap->msix_info[0].desc, adap);
1060                 if (err)
1061                         goto irq_err;
1062
1063                 err = request_msix_data_irqs(adap);
1064                 if (err) {
1065                         free_irq(adap->msix_info[0].vec, adap);
1066                         goto irq_err;
1067                 }
1068         } else if ((err = request_irq(adap->pdev->irq,
1069                                       t3_intr_handler(adap,
1070                                                       adap->sge.qs[0].rspq.
1071                                                       polling),
1072                                       (adap->flags & USING_MSI) ?
1073                                        0 : IRQF_SHARED,
1074                                       adap->name, adap)))
1075                 goto irq_err;
1076
1077         enable_all_napi(adap);
1078         t3_sge_start(adap);
1079         t3_intr_enable(adap);
1080
1081         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1082             is_offload(adap) && init_tp_parity(adap) == 0)
1083                 adap->flags |= TP_PARITY_INIT;
1084
1085         if (adap->flags & TP_PARITY_INIT) {
1086                 t3_write_reg(adap, A_TP_INT_CAUSE,
1087                              F_CMCACHEPERR | F_ARPLUTPERR);
1088                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1089         }
1090
1091         if (!(adap->flags & QUEUES_BOUND)) {
1092                 err = bind_qsets(adap);
1093                 if (err) {
1094                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1095                         t3_intr_disable(adap);
1096                         free_irq_resources(adap);
1097                         goto out;
1098                 }
1099                 adap->flags |= QUEUES_BOUND;
1100         }
1101
1102 out:
1103         return err;
1104 irq_err:
1105         CH_ERR(adap, "request_irq failed, err %d\n", err);
1106         goto out;
1107 }
1108
1109 /*
1110  * Release resources when all the ports and offloading have been stopped.
1111  */
1112 static void cxgb_down(struct adapter *adapter)
1113 {
1114         t3_sge_stop(adapter);
1115         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1116         t3_intr_disable(adapter);
1117         spin_unlock_irq(&adapter->work_lock);
1118
1119         free_irq_resources(adapter);
1120         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1121         quiesce_rx(adapter);
1122 }
1123
1124 static void schedule_chk_task(struct adapter *adap)
1125 {
1126         unsigned int timeo;
1127
1128         timeo = adap->params.linkpoll_period ?
1129             (HZ * adap->params.linkpoll_period) / 10 :
1130             adap->params.stats_update_period * HZ;
1131         if (timeo)
1132                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1133 }
1134
1135 static int offload_open(struct net_device *dev)
1136 {
1137         struct port_info *pi = netdev_priv(dev);
1138         struct adapter *adapter = pi->adapter;
1139         struct t3cdev *tdev = dev2t3cdev(dev);
1140         int adap_up = adapter->open_device_map & PORT_MASK;
1141         int err;
1142
1143         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1144                 return 0;
1145
1146         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1147                 goto out;
1148
1149         t3_tp_set_offload_mode(adapter, 1);
1150         tdev->lldev = adapter->port[0];
1151         err = cxgb3_offload_activate(adapter);
1152         if (err)
1153                 goto out;
1154
1155         init_port_mtus(adapter);
1156         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1157                      adapter->params.b_wnd,
1158                      adapter->params.rev == 0 ?
1159                      adapter->port[0]->mtu : 0xffff);
1160         init_smt(adapter);
1161
1162         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1163                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1164
1165         /* Call back all registered clients */
1166         cxgb3_add_clients(tdev);
1167
1168 out:
1169         /* restore them in case the offload module has changed them */
1170         if (err) {
1171                 t3_tp_set_offload_mode(adapter, 0);
1172                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1173                 cxgb3_set_dummy_ops(tdev);
1174         }
1175         return err;
1176 }
1177
1178 static int offload_close(struct t3cdev *tdev)
1179 {
1180         struct adapter *adapter = tdev2adap(tdev);
1181
1182         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1183                 return 0;
1184
1185         /* Call back all registered clients */
1186         cxgb3_remove_clients(tdev);
1187
1188         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1189
1190         tdev->lldev = NULL;
1191         cxgb3_set_dummy_ops(tdev);
1192         t3_tp_set_offload_mode(adapter, 0);
1193         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1194
1195         if (!adapter->open_device_map)
1196                 cxgb_down(adapter);
1197
1198         cxgb3_offload_deactivate(adapter);
1199         return 0;
1200 }
1201
1202 static int cxgb_open(struct net_device *dev)
1203 {
1204         struct port_info *pi = netdev_priv(dev);
1205         struct adapter *adapter = pi->adapter;
1206         int other_ports = adapter->open_device_map & PORT_MASK;
1207         int err;
1208
1209         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1210                 return err;
1211
1212         set_bit(pi->port_id, &adapter->open_device_map);
1213         if (is_offload(adapter) && !ofld_disable) {
1214                 err = offload_open(dev);
1215                 if (err)
1216                         printk(KERN_WARNING
1217                                "Could not initialize offload capabilities\n");
1218         }
1219
1220         dev->real_num_tx_queues = pi->nqsets;
1221         link_start(dev);
1222         t3_port_intr_enable(adapter, pi->port_id);
1223         netif_tx_start_all_queues(dev);
1224         if (!other_ports)
1225                 schedule_chk_task(adapter);
1226
1227         return 0;
1228 }
1229
1230 static int cxgb_close(struct net_device *dev)
1231 {
1232         struct port_info *pi = netdev_priv(dev);
1233         struct adapter *adapter = pi->adapter;
1234
1235         /* Stop link fault interrupts */
1236         t3_xgm_intr_disable(adapter, pi->port_id);
1237         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1238
1239         t3_port_intr_disable(adapter, pi->port_id);
1240         netif_tx_stop_all_queues(dev);
1241         pi->phy.ops->power_down(&pi->phy, 1);
1242         netif_carrier_off(dev);
1243         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1244
1245         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1246         clear_bit(pi->port_id, &adapter->open_device_map);
1247         spin_unlock_irq(&adapter->work_lock);
1248
1249         if (!(adapter->open_device_map & PORT_MASK))
1250                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1251                                                   &adapter->adap_check_task);
1252
1253         if (!adapter->open_device_map)
1254                 cxgb_down(adapter);
1255
1256         return 0;
1257 }
1258
1259 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1260 {
1261         struct port_info *pi = netdev_priv(dev);
1262         struct adapter *adapter = pi->adapter;
1263         struct net_device_stats *ns = &pi->netstats;
1264         const struct mac_stats *pstats;
1265
1266         spin_lock(&adapter->stats_lock);
1267         pstats = t3_mac_update_stats(&pi->mac);
1268         spin_unlock(&adapter->stats_lock);
1269
1270         ns->tx_bytes = pstats->tx_octets;
1271         ns->tx_packets = pstats->tx_frames;
1272         ns->rx_bytes = pstats->rx_octets;
1273         ns->rx_packets = pstats->rx_frames;
1274         ns->multicast = pstats->rx_mcast_frames;
1275
1276         ns->tx_errors = pstats->tx_underrun;
1277         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1278             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1279             pstats->rx_fifo_ovfl;
1280
1281         /* detailed rx_errors */
1282         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1283         ns->rx_over_errors = 0;
1284         ns->rx_crc_errors = pstats->rx_fcs_errs;
1285         ns->rx_frame_errors = pstats->rx_symbol_errs;
1286         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1287         ns->rx_missed_errors = pstats->rx_cong_drops;
1288
1289         /* detailed tx_errors */
1290         ns->tx_aborted_errors = 0;
1291         ns->tx_carrier_errors = 0;
1292         ns->tx_fifo_errors = pstats->tx_underrun;
1293         ns->tx_heartbeat_errors = 0;
1294         ns->tx_window_errors = 0;
1295         return ns;
1296 }
1297
1298 static u32 get_msglevel(struct net_device *dev)
1299 {
1300         struct port_info *pi = netdev_priv(dev);
1301         struct adapter *adapter = pi->adapter;
1302
1303         return adapter->msg_enable;
1304 }
1305
1306 static void set_msglevel(struct net_device *dev, u32 val)
1307 {
1308         struct port_info *pi = netdev_priv(dev);
1309         struct adapter *adapter = pi->adapter;
1310
1311         adapter->msg_enable = val;
1312 }
1313
1314 static char stats_strings[][ETH_GSTRING_LEN] = {
1315         "TxOctetsOK         ",
1316         "TxFramesOK         ",
1317         "TxMulticastFramesOK",
1318         "TxBroadcastFramesOK",
1319         "TxPauseFrames      ",
1320         "TxUnderrun         ",
1321         "TxExtUnderrun      ",
1322
1323         "TxFrames64         ",
1324         "TxFrames65To127    ",
1325         "TxFrames128To255   ",
1326         "TxFrames256To511   ",
1327         "TxFrames512To1023  ",
1328         "TxFrames1024To1518 ",
1329         "TxFrames1519ToMax  ",
1330
1331         "RxOctetsOK         ",
1332         "RxFramesOK         ",
1333         "RxMulticastFramesOK",
1334         "RxBroadcastFramesOK",
1335         "RxPauseFrames      ",
1336         "RxFCSErrors        ",
1337         "RxSymbolErrors     ",
1338         "RxShortErrors      ",
1339         "RxJabberErrors     ",
1340         "RxLengthErrors     ",
1341         "RxFIFOoverflow     ",
1342
1343         "RxFrames64         ",
1344         "RxFrames65To127    ",
1345         "RxFrames128To255   ",
1346         "RxFrames256To511   ",
1347         "RxFrames512To1023  ",
1348         "RxFrames1024To1518 ",
1349         "RxFrames1519ToMax  ",
1350
1351         "PhyFIFOErrors      ",
1352         "TSO                ",
1353         "VLANextractions    ",
1354         "VLANinsertions     ",
1355         "TxCsumOffload      ",
1356         "RxCsumGood         ",
1357         "LroAggregated      ",
1358         "LroFlushed         ",
1359         "LroNoDesc          ",
1360         "RxDrops            ",
1361
1362         "CheckTXEnToggled   ",
1363         "CheckResets        ",
1364
1365         "LinkFaults         ",
1366 };
1367
1368 static int get_sset_count(struct net_device *dev, int sset)
1369 {
1370         switch (sset) {
1371         case ETH_SS_STATS:
1372                 return ARRAY_SIZE(stats_strings);
1373         default:
1374                 return -EOPNOTSUPP;
1375         }
1376 }
1377
1378 #define T3_REGMAP_SIZE (3 * 1024)
1379
1380 static int get_regs_len(struct net_device *dev)
1381 {
1382         return T3_REGMAP_SIZE;
1383 }
1384
1385 static int get_eeprom_len(struct net_device *dev)
1386 {
1387         return EEPROMSIZE;
1388 }
1389
1390 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1391 {
1392         struct port_info *pi = netdev_priv(dev);
1393         struct adapter *adapter = pi->adapter;
1394         u32 fw_vers = 0;
1395         u32 tp_vers = 0;
1396
1397         spin_lock(&adapter->stats_lock);
1398         t3_get_fw_version(adapter, &fw_vers);
1399         t3_get_tp_version(adapter, &tp_vers);
1400         spin_unlock(&adapter->stats_lock);
1401
1402         strcpy(info->driver, DRV_NAME);
1403         strcpy(info->version, DRV_VERSION);
1404         strcpy(info->bus_info, pci_name(adapter->pdev));
1405         if (!fw_vers)
1406                 strcpy(info->fw_version, "N/A");
1407         else {
1408                 snprintf(info->fw_version, sizeof(info->fw_version),
1409                          "%s %u.%u.%u TP %u.%u.%u",
1410                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1411                          G_FW_VERSION_MAJOR(fw_vers),
1412                          G_FW_VERSION_MINOR(fw_vers),
1413                          G_FW_VERSION_MICRO(fw_vers),
1414                          G_TP_VERSION_MAJOR(tp_vers),
1415                          G_TP_VERSION_MINOR(tp_vers),
1416                          G_TP_VERSION_MICRO(tp_vers));
1417         }
1418 }
1419
1420 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1421 {
1422         if (stringset == ETH_SS_STATS)
1423                 memcpy(data, stats_strings, sizeof(stats_strings));
1424 }
1425
1426 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1427                                             struct port_info *p, int idx)
1428 {
1429         int i;
1430         unsigned long tot = 0;
1431
1432         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1433                 tot += adapter->sge.qs[i].port_stats[idx];
1434         return tot;
1435 }
1436
1437 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1438                       u64 *data)
1439 {
1440         struct port_info *pi = netdev_priv(dev);
1441         struct adapter *adapter = pi->adapter;
1442         const struct mac_stats *s;
1443
1444         spin_lock(&adapter->stats_lock);
1445         s = t3_mac_update_stats(&pi->mac);
1446         spin_unlock(&adapter->stats_lock);
1447
1448         *data++ = s->tx_octets;
1449         *data++ = s->tx_frames;
1450         *data++ = s->tx_mcast_frames;
1451         *data++ = s->tx_bcast_frames;
1452         *data++ = s->tx_pause;
1453         *data++ = s->tx_underrun;
1454         *data++ = s->tx_fifo_urun;
1455
1456         *data++ = s->tx_frames_64;
1457         *data++ = s->tx_frames_65_127;
1458         *data++ = s->tx_frames_128_255;
1459         *data++ = s->tx_frames_256_511;
1460         *data++ = s->tx_frames_512_1023;
1461         *data++ = s->tx_frames_1024_1518;
1462         *data++ = s->tx_frames_1519_max;
1463
1464         *data++ = s->rx_octets;
1465         *data++ = s->rx_frames;
1466         *data++ = s->rx_mcast_frames;
1467         *data++ = s->rx_bcast_frames;
1468         *data++ = s->rx_pause;
1469         *data++ = s->rx_fcs_errs;
1470         *data++ = s->rx_symbol_errs;
1471         *data++ = s->rx_short;
1472         *data++ = s->rx_jabber;
1473         *data++ = s->rx_too_long;
1474         *data++ = s->rx_fifo_ovfl;
1475
1476         *data++ = s->rx_frames_64;
1477         *data++ = s->rx_frames_65_127;
1478         *data++ = s->rx_frames_128_255;
1479         *data++ = s->rx_frames_256_511;
1480         *data++ = s->rx_frames_512_1023;
1481         *data++ = s->rx_frames_1024_1518;
1482         *data++ = s->rx_frames_1519_max;
1483
1484         *data++ = pi->phy.fifo_errors;
1485
1486         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1487         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1488         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1489         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1490         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1491         *data++ = 0;
1492         *data++ = 0;
1493         *data++ = 0;
1494         *data++ = s->rx_cong_drops;
1495
1496         *data++ = s->num_toggled;
1497         *data++ = s->num_resets;
1498
1499         *data++ = s->link_faults;
1500 }
1501
1502 static inline void reg_block_dump(struct adapter *ap, void *buf,
1503                                   unsigned int start, unsigned int end)
1504 {
1505         u32 *p = buf + start;
1506
1507         for (; start <= end; start += sizeof(u32))
1508                 *p++ = t3_read_reg(ap, start);
1509 }
1510
1511 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1512                      void *buf)
1513 {
1514         struct port_info *pi = netdev_priv(dev);
1515         struct adapter *ap = pi->adapter;
1516
1517         /*
1518          * Version scheme:
1519          * bits 0..9: chip version
1520          * bits 10..15: chip revision
1521          * bit 31: set for PCIe cards
1522          */
1523         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1524
1525         /*
1526          * We skip the MAC statistics registers because they are clear-on-read.
1527          * Also reading multi-register stats would need to synchronize with the
1528          * periodic mac stats accumulation.  Hard to justify the complexity.
1529          */
1530         memset(buf, 0, T3_REGMAP_SIZE);
1531         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1532         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1533         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1534         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1535         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1536         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1537                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1538         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1539                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1540 }
1541
1542 static int restart_autoneg(struct net_device *dev)
1543 {
1544         struct port_info *p = netdev_priv(dev);
1545
1546         if (!netif_running(dev))
1547                 return -EAGAIN;
1548         if (p->link_config.autoneg != AUTONEG_ENABLE)
1549                 return -EINVAL;
1550         p->phy.ops->autoneg_restart(&p->phy);
1551         return 0;
1552 }
1553
1554 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1555 {
1556         struct port_info *pi = netdev_priv(dev);
1557         struct adapter *adapter = pi->adapter;
1558         int i;
1559
1560         if (data == 0)
1561                 data = 2;
1562
1563         for (i = 0; i < data * 2; i++) {
1564                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1565                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1566                 if (msleep_interruptible(500))
1567                         break;
1568         }
1569         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1570                          F_GPIO0_OUT_VAL);
1571         return 0;
1572 }
1573
1574 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575 {
1576         struct port_info *p = netdev_priv(dev);
1577
1578         cmd->supported = p->link_config.supported;
1579         cmd->advertising = p->link_config.advertising;
1580
1581         if (netif_carrier_ok(dev)) {
1582                 cmd->speed = p->link_config.speed;
1583                 cmd->duplex = p->link_config.duplex;
1584         } else {
1585                 cmd->speed = -1;
1586                 cmd->duplex = -1;
1587         }
1588
1589         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1590         cmd->phy_address = p->phy.addr;
1591         cmd->transceiver = XCVR_EXTERNAL;
1592         cmd->autoneg = p->link_config.autoneg;
1593         cmd->maxtxpkt = 0;
1594         cmd->maxrxpkt = 0;
1595         return 0;
1596 }
1597
1598 static int speed_duplex_to_caps(int speed, int duplex)
1599 {
1600         int cap = 0;
1601
1602         switch (speed) {
1603         case SPEED_10:
1604                 if (duplex == DUPLEX_FULL)
1605                         cap = SUPPORTED_10baseT_Full;
1606                 else
1607                         cap = SUPPORTED_10baseT_Half;
1608                 break;
1609         case SPEED_100:
1610                 if (duplex == DUPLEX_FULL)
1611                         cap = SUPPORTED_100baseT_Full;
1612                 else
1613                         cap = SUPPORTED_100baseT_Half;
1614                 break;
1615         case SPEED_1000:
1616                 if (duplex == DUPLEX_FULL)
1617                         cap = SUPPORTED_1000baseT_Full;
1618                 else
1619                         cap = SUPPORTED_1000baseT_Half;
1620                 break;
1621         case SPEED_10000:
1622                 if (duplex == DUPLEX_FULL)
1623                         cap = SUPPORTED_10000baseT_Full;
1624         }
1625         return cap;
1626 }
1627
1628 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1629                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1630                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1631                       ADVERTISED_10000baseT_Full)
1632
1633 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1634 {
1635         struct port_info *p = netdev_priv(dev);
1636         struct link_config *lc = &p->link_config;
1637
1638         if (!(lc->supported & SUPPORTED_Autoneg)) {
1639                 /*
1640                  * PHY offers a single speed/duplex.  See if that's what's
1641                  * being requested.
1642                  */
1643                 if (cmd->autoneg == AUTONEG_DISABLE) {
1644                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1645                         if (lc->supported & cap)
1646                                 return 0;
1647                 }
1648                 return -EINVAL;
1649         }
1650
1651         if (cmd->autoneg == AUTONEG_DISABLE) {
1652                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1653
1654                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1655                         return -EINVAL;
1656                 lc->requested_speed = cmd->speed;
1657                 lc->requested_duplex = cmd->duplex;
1658                 lc->advertising = 0;
1659         } else {
1660                 cmd->advertising &= ADVERTISED_MASK;
1661                 cmd->advertising &= lc->supported;
1662                 if (!cmd->advertising)
1663                         return -EINVAL;
1664                 lc->requested_speed = SPEED_INVALID;
1665                 lc->requested_duplex = DUPLEX_INVALID;
1666                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1667         }
1668         lc->autoneg = cmd->autoneg;
1669         if (netif_running(dev))
1670                 t3_link_start(&p->phy, &p->mac, lc);
1671         return 0;
1672 }
1673
1674 static void get_pauseparam(struct net_device *dev,
1675                            struct ethtool_pauseparam *epause)
1676 {
1677         struct port_info *p = netdev_priv(dev);
1678
1679         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1680         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1681         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1682 }
1683
1684 static int set_pauseparam(struct net_device *dev,
1685                           struct ethtool_pauseparam *epause)
1686 {
1687         struct port_info *p = netdev_priv(dev);
1688         struct link_config *lc = &p->link_config;
1689
1690         if (epause->autoneg == AUTONEG_DISABLE)
1691                 lc->requested_fc = 0;
1692         else if (lc->supported & SUPPORTED_Autoneg)
1693                 lc->requested_fc = PAUSE_AUTONEG;
1694         else
1695                 return -EINVAL;
1696
1697         if (epause->rx_pause)
1698                 lc->requested_fc |= PAUSE_RX;
1699         if (epause->tx_pause)
1700                 lc->requested_fc |= PAUSE_TX;
1701         if (lc->autoneg == AUTONEG_ENABLE) {
1702                 if (netif_running(dev))
1703                         t3_link_start(&p->phy, &p->mac, lc);
1704         } else {
1705                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1706                 if (netif_running(dev))
1707                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1708         }
1709         return 0;
1710 }
1711
1712 static u32 get_rx_csum(struct net_device *dev)
1713 {
1714         struct port_info *p = netdev_priv(dev);
1715
1716         return p->rx_offload & T3_RX_CSUM;
1717 }
1718
1719 static int set_rx_csum(struct net_device *dev, u32 data)
1720 {
1721         struct port_info *p = netdev_priv(dev);
1722
1723         if (data) {
1724                 p->rx_offload |= T3_RX_CSUM;
1725         } else {
1726                 int i;
1727
1728                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1729                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1730                         set_qset_lro(dev, i, 0);
1731         }
1732         return 0;
1733 }
1734
1735 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1736 {
1737         struct port_info *pi = netdev_priv(dev);
1738         struct adapter *adapter = pi->adapter;
1739         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1740
1741         e->rx_max_pending = MAX_RX_BUFFERS;
1742         e->rx_mini_max_pending = 0;
1743         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1744         e->tx_max_pending = MAX_TXQ_ENTRIES;
1745
1746         e->rx_pending = q->fl_size;
1747         e->rx_mini_pending = q->rspq_size;
1748         e->rx_jumbo_pending = q->jumbo_size;
1749         e->tx_pending = q->txq_size[0];
1750 }
1751
1752 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1753 {
1754         struct port_info *pi = netdev_priv(dev);
1755         struct adapter *adapter = pi->adapter;
1756         struct qset_params *q;
1757         int i;
1758
1759         if (e->rx_pending > MAX_RX_BUFFERS ||
1760             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1761             e->tx_pending > MAX_TXQ_ENTRIES ||
1762             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1763             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1764             e->rx_pending < MIN_FL_ENTRIES ||
1765             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1766             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1767                 return -EINVAL;
1768
1769         if (adapter->flags & FULL_INIT_DONE)
1770                 return -EBUSY;
1771
1772         q = &adapter->params.sge.qset[pi->first_qset];
1773         for (i = 0; i < pi->nqsets; ++i, ++q) {
1774                 q->rspq_size = e->rx_mini_pending;
1775                 q->fl_size = e->rx_pending;
1776                 q->jumbo_size = e->rx_jumbo_pending;
1777                 q->txq_size[0] = e->tx_pending;
1778                 q->txq_size[1] = e->tx_pending;
1779                 q->txq_size[2] = e->tx_pending;
1780         }
1781         return 0;
1782 }
1783
1784 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1785 {
1786         struct port_info *pi = netdev_priv(dev);
1787         struct adapter *adapter = pi->adapter;
1788         struct qset_params *qsp = &adapter->params.sge.qset[0];
1789         struct sge_qset *qs = &adapter->sge.qs[0];
1790
1791         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1792                 return -EINVAL;
1793
1794         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1795         t3_update_qset_coalesce(qs, qsp);
1796         return 0;
1797 }
1798
1799 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1800 {
1801         struct port_info *pi = netdev_priv(dev);
1802         struct adapter *adapter = pi->adapter;
1803         struct qset_params *q = adapter->params.sge.qset;
1804
1805         c->rx_coalesce_usecs = q->coalesce_usecs;
1806         return 0;
1807 }
1808
1809 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1810                       u8 * data)
1811 {
1812         struct port_info *pi = netdev_priv(dev);
1813         struct adapter *adapter = pi->adapter;
1814         int i, err = 0;
1815
1816         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1817         if (!buf)
1818                 return -ENOMEM;
1819
1820         e->magic = EEPROM_MAGIC;
1821         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1822                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1823
1824         if (!err)
1825                 memcpy(data, buf + e->offset, e->len);
1826         kfree(buf);
1827         return err;
1828 }
1829
1830 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1831                       u8 * data)
1832 {
1833         struct port_info *pi = netdev_priv(dev);
1834         struct adapter *adapter = pi->adapter;
1835         u32 aligned_offset, aligned_len;
1836         __le32 *p;
1837         u8 *buf;
1838         int err;
1839
1840         if (eeprom->magic != EEPROM_MAGIC)
1841                 return -EINVAL;
1842
1843         aligned_offset = eeprom->offset & ~3;
1844         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1845
1846         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1847                 buf = kmalloc(aligned_len, GFP_KERNEL);
1848                 if (!buf)
1849                         return -ENOMEM;
1850                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1851                 if (!err && aligned_len > 4)
1852                         err = t3_seeprom_read(adapter,
1853                                               aligned_offset + aligned_len - 4,
1854                                               (__le32 *) & buf[aligned_len - 4]);
1855                 if (err)
1856                         goto out;
1857                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1858         } else
1859                 buf = data;
1860
1861         err = t3_seeprom_wp(adapter, 0);
1862         if (err)
1863                 goto out;
1864
1865         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1866                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1867                 aligned_offset += 4;
1868         }
1869
1870         if (!err)
1871                 err = t3_seeprom_wp(adapter, 1);
1872 out:
1873         if (buf != data)
1874                 kfree(buf);
1875         return err;
1876 }
1877
1878 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1879 {
1880         wol->supported = 0;
1881         wol->wolopts = 0;
1882         memset(&wol->sopass, 0, sizeof(wol->sopass));
1883 }
1884
1885 static const struct ethtool_ops cxgb_ethtool_ops = {
1886         .get_settings = get_settings,
1887         .set_settings = set_settings,
1888         .get_drvinfo = get_drvinfo,
1889         .get_msglevel = get_msglevel,
1890         .set_msglevel = set_msglevel,
1891         .get_ringparam = get_sge_param,
1892         .set_ringparam = set_sge_param,
1893         .get_coalesce = get_coalesce,
1894         .set_coalesce = set_coalesce,
1895         .get_eeprom_len = get_eeprom_len,
1896         .get_eeprom = get_eeprom,
1897         .set_eeprom = set_eeprom,
1898         .get_pauseparam = get_pauseparam,
1899         .set_pauseparam = set_pauseparam,
1900         .get_rx_csum = get_rx_csum,
1901         .set_rx_csum = set_rx_csum,
1902         .set_tx_csum = ethtool_op_set_tx_csum,
1903         .set_sg = ethtool_op_set_sg,
1904         .get_link = ethtool_op_get_link,
1905         .get_strings = get_strings,
1906         .phys_id = cxgb3_phys_id,
1907         .nway_reset = restart_autoneg,
1908         .get_sset_count = get_sset_count,
1909         .get_ethtool_stats = get_stats,
1910         .get_regs_len = get_regs_len,
1911         .get_regs = get_regs,
1912         .get_wol = get_wol,
1913         .set_tso = ethtool_op_set_tso,
1914 };
1915
1916 static int in_range(int val, int lo, int hi)
1917 {
1918         return val < 0 || (val <= hi && val >= lo);
1919 }
1920
1921 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1922 {
1923         struct port_info *pi = netdev_priv(dev);
1924         struct adapter *adapter = pi->adapter;
1925         u32 cmd;
1926         int ret;
1927
1928         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1929                 return -EFAULT;
1930
1931         switch (cmd) {
1932         case CHELSIO_SET_QSET_PARAMS:{
1933                 int i;
1934                 struct qset_params *q;
1935                 struct ch_qset_params t;
1936                 int q1 = pi->first_qset;
1937                 int nqsets = pi->nqsets;
1938
1939                 if (!capable(CAP_NET_ADMIN))
1940                         return -EPERM;
1941                 if (copy_from_user(&t, useraddr, sizeof(t)))
1942                         return -EFAULT;
1943                 if (t.qset_idx >= SGE_QSETS)
1944                         return -EINVAL;
1945                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1946                         !in_range(t.cong_thres, 0, 255) ||
1947                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1948                                 MAX_TXQ_ENTRIES) ||
1949                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1950                                 MAX_TXQ_ENTRIES) ||
1951                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1952                                 MAX_CTRL_TXQ_ENTRIES) ||
1953                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1954                                 MAX_RX_BUFFERS)
1955                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1956                                         MAX_RX_JUMBO_BUFFERS)
1957                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1958                                         MAX_RSPQ_ENTRIES))
1959                         return -EINVAL;
1960
1961                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1962                         for_each_port(adapter, i) {
1963                                 pi = adap2pinfo(adapter, i);
1964                                 if (t.qset_idx >= pi->first_qset &&
1965                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1966                                     !(pi->rx_offload & T3_RX_CSUM))
1967                                         return -EINVAL;
1968                         }
1969
1970                 if ((adapter->flags & FULL_INIT_DONE) &&
1971                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1972                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1973                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1974                         t.polling >= 0 || t.cong_thres >= 0))
1975                         return -EBUSY;
1976
1977                 /* Allow setting of any available qset when offload enabled */
1978                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1979                         q1 = 0;
1980                         for_each_port(adapter, i) {
1981                                 pi = adap2pinfo(adapter, i);
1982                                 nqsets += pi->first_qset + pi->nqsets;
1983                         }
1984                 }
1985
1986                 if (t.qset_idx < q1)
1987                         return -EINVAL;
1988                 if (t.qset_idx > q1 + nqsets - 1)
1989                         return -EINVAL;
1990
1991                 q = &adapter->params.sge.qset[t.qset_idx];
1992
1993                 if (t.rspq_size >= 0)
1994                         q->rspq_size = t.rspq_size;
1995                 if (t.fl_size[0] >= 0)
1996                         q->fl_size = t.fl_size[0];
1997                 if (t.fl_size[1] >= 0)
1998                         q->jumbo_size = t.fl_size[1];
1999                 if (t.txq_size[0] >= 0)
2000                         q->txq_size[0] = t.txq_size[0];
2001                 if (t.txq_size[1] >= 0)
2002                         q->txq_size[1] = t.txq_size[1];
2003                 if (t.txq_size[2] >= 0)
2004                         q->txq_size[2] = t.txq_size[2];
2005                 if (t.cong_thres >= 0)
2006                         q->cong_thres = t.cong_thres;
2007                 if (t.intr_lat >= 0) {
2008                         struct sge_qset *qs =
2009                                 &adapter->sge.qs[t.qset_idx];
2010
2011                         q->coalesce_usecs = t.intr_lat;
2012                         t3_update_qset_coalesce(qs, q);
2013                 }
2014                 if (t.polling >= 0) {
2015                         if (adapter->flags & USING_MSIX)
2016                                 q->polling = t.polling;
2017                         else {
2018                                 /* No polling with INTx for T3A */
2019                                 if (adapter->params.rev == 0 &&
2020                                         !(adapter->flags & USING_MSI))
2021                                         t.polling = 0;
2022
2023                                 for (i = 0; i < SGE_QSETS; i++) {
2024                                         q = &adapter->params.sge.
2025                                                 qset[i];
2026                                         q->polling = t.polling;
2027                                 }
2028                         }
2029                 }
2030                 if (t.lro >= 0)
2031                         set_qset_lro(dev, t.qset_idx, t.lro);
2032
2033                 break;
2034         }
2035         case CHELSIO_GET_QSET_PARAMS:{
2036                 struct qset_params *q;
2037                 struct ch_qset_params t;
2038                 int q1 = pi->first_qset;
2039                 int nqsets = pi->nqsets;
2040                 int i;
2041
2042                 if (copy_from_user(&t, useraddr, sizeof(t)))
2043                         return -EFAULT;
2044
2045                 /* Display qsets for all ports when offload enabled */
2046                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2047                         q1 = 0;
2048                         for_each_port(adapter, i) {
2049                                 pi = adap2pinfo(adapter, i);
2050                                 nqsets = pi->first_qset + pi->nqsets;
2051                         }
2052                 }
2053
2054                 if (t.qset_idx >= nqsets)
2055                         return -EINVAL;
2056
2057                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2058                 t.rspq_size = q->rspq_size;
2059                 t.txq_size[0] = q->txq_size[0];
2060                 t.txq_size[1] = q->txq_size[1];
2061                 t.txq_size[2] = q->txq_size[2];
2062                 t.fl_size[0] = q->fl_size;
2063                 t.fl_size[1] = q->jumbo_size;
2064                 t.polling = q->polling;
2065                 t.lro = q->lro;
2066                 t.intr_lat = q->coalesce_usecs;
2067                 t.cong_thres = q->cong_thres;
2068                 t.qnum = q1;
2069
2070                 if (adapter->flags & USING_MSIX)
2071                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2072                 else
2073                         t.vector = adapter->pdev->irq;
2074
2075                 if (copy_to_user(useraddr, &t, sizeof(t)))
2076                         return -EFAULT;
2077                 break;
2078         }
2079         case CHELSIO_SET_QSET_NUM:{
2080                 struct ch_reg edata;
2081                 unsigned int i, first_qset = 0, other_qsets = 0;
2082
2083                 if (!capable(CAP_NET_ADMIN))
2084                         return -EPERM;
2085                 if (adapter->flags & FULL_INIT_DONE)
2086                         return -EBUSY;
2087                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2088                         return -EFAULT;
2089                 if (edata.val < 1 ||
2090                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2091                         return -EINVAL;
2092
2093                 for_each_port(adapter, i)
2094                         if (adapter->port[i] && adapter->port[i] != dev)
2095                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2096
2097                 if (edata.val + other_qsets > SGE_QSETS)
2098                         return -EINVAL;
2099
2100                 pi->nqsets = edata.val;
2101
2102                 for_each_port(adapter, i)
2103                         if (adapter->port[i]) {
2104                                 pi = adap2pinfo(adapter, i);
2105                                 pi->first_qset = first_qset;
2106                                 first_qset += pi->nqsets;
2107                         }
2108                 break;
2109         }
2110         case CHELSIO_GET_QSET_NUM:{
2111                 struct ch_reg edata;
2112
2113                 edata.cmd = CHELSIO_GET_QSET_NUM;
2114                 edata.val = pi->nqsets;
2115                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2116                         return -EFAULT;
2117                 break;
2118         }
2119         case CHELSIO_LOAD_FW:{
2120                 u8 *fw_data;
2121                 struct ch_mem_range t;
2122
2123                 if (!capable(CAP_SYS_RAWIO))
2124                         return -EPERM;
2125                 if (copy_from_user(&t, useraddr, sizeof(t)))
2126                         return -EFAULT;
2127                 /* Check t.len sanity ? */
2128                 fw_data = kmalloc(t.len, GFP_KERNEL);
2129                 if (!fw_data)
2130                         return -ENOMEM;
2131
2132                 if (copy_from_user
2133                         (fw_data, useraddr + sizeof(t), t.len)) {
2134                         kfree(fw_data);
2135                         return -EFAULT;
2136                 }
2137
2138                 ret = t3_load_fw(adapter, fw_data, t.len);
2139                 kfree(fw_data);
2140                 if (ret)
2141                         return ret;
2142                 break;
2143         }
2144         case CHELSIO_SETMTUTAB:{
2145                 struct ch_mtus m;
2146                 int i;
2147
2148                 if (!is_offload(adapter))
2149                         return -EOPNOTSUPP;
2150                 if (!capable(CAP_NET_ADMIN))
2151                         return -EPERM;
2152                 if (offload_running(adapter))
2153                         return -EBUSY;
2154                 if (copy_from_user(&m, useraddr, sizeof(m)))
2155                         return -EFAULT;
2156                 if (m.nmtus != NMTUS)
2157                         return -EINVAL;
2158                 if (m.mtus[0] < 81)     /* accommodate SACK */
2159                         return -EINVAL;
2160
2161                 /* MTUs must be in ascending order */
2162                 for (i = 1; i < NMTUS; ++i)
2163                         if (m.mtus[i] < m.mtus[i - 1])
2164                                 return -EINVAL;
2165
2166                 memcpy(adapter->params.mtus, m.mtus,
2167                         sizeof(adapter->params.mtus));
2168                 break;
2169         }
2170         case CHELSIO_GET_PM:{
2171                 struct tp_params *p = &adapter->params.tp;
2172                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2173
2174                 if (!is_offload(adapter))
2175                         return -EOPNOTSUPP;
2176                 m.tx_pg_sz = p->tx_pg_size;
2177                 m.tx_num_pg = p->tx_num_pgs;
2178                 m.rx_pg_sz = p->rx_pg_size;
2179                 m.rx_num_pg = p->rx_num_pgs;
2180                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2181                 if (copy_to_user(useraddr, &m, sizeof(m)))
2182                         return -EFAULT;
2183                 break;
2184         }
2185         case CHELSIO_SET_PM:{
2186                 struct ch_pm m;
2187                 struct tp_params *p = &adapter->params.tp;
2188
2189                 if (!is_offload(adapter))
2190                         return -EOPNOTSUPP;
2191                 if (!capable(CAP_NET_ADMIN))
2192                         return -EPERM;
2193                 if (adapter->flags & FULL_INIT_DONE)
2194                         return -EBUSY;
2195                 if (copy_from_user(&m, useraddr, sizeof(m)))
2196                         return -EFAULT;
2197                 if (!is_power_of_2(m.rx_pg_sz) ||
2198                         !is_power_of_2(m.tx_pg_sz))
2199                         return -EINVAL; /* not power of 2 */
2200                 if (!(m.rx_pg_sz & 0x14000))
2201                         return -EINVAL; /* not 16KB or 64KB */
2202                 if (!(m.tx_pg_sz & 0x1554000))
2203                         return -EINVAL;
2204                 if (m.tx_num_pg == -1)
2205                         m.tx_num_pg = p->tx_num_pgs;
2206                 if (m.rx_num_pg == -1)
2207                         m.rx_num_pg = p->rx_num_pgs;
2208                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2209                         return -EINVAL;
2210                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2211                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2212                         return -EINVAL;
2213                 p->rx_pg_size = m.rx_pg_sz;
2214                 p->tx_pg_size = m.tx_pg_sz;
2215                 p->rx_num_pgs = m.rx_num_pg;
2216                 p->tx_num_pgs = m.tx_num_pg;
2217                 break;
2218         }
2219         case CHELSIO_GET_MEM:{
2220                 struct ch_mem_range t;
2221                 struct mc7 *mem;
2222                 u64 buf[32];
2223
2224                 if (!is_offload(adapter))
2225                         return -EOPNOTSUPP;
2226                 if (!(adapter->flags & FULL_INIT_DONE))
2227                         return -EIO;    /* need the memory controllers */
2228                 if (copy_from_user(&t, useraddr, sizeof(t)))
2229                         return -EFAULT;
2230                 if ((t.addr & 7) || (t.len & 7))
2231                         return -EINVAL;
2232                 if (t.mem_id == MEM_CM)
2233                         mem = &adapter->cm;
2234                 else if (t.mem_id == MEM_PMRX)
2235                         mem = &adapter->pmrx;
2236                 else if (t.mem_id == MEM_PMTX)
2237                         mem = &adapter->pmtx;
2238                 else
2239                         return -EINVAL;
2240
2241                 /*
2242                  * Version scheme:
2243                  * bits 0..9: chip version
2244                  * bits 10..15: chip revision
2245                  */
2246                 t.version = 3 | (adapter->params.rev << 10);
2247                 if (copy_to_user(useraddr, &t, sizeof(t)))
2248                         return -EFAULT;
2249
2250                 /*
2251                  * Read 256 bytes at a time as len can be large and we don't
2252                  * want to use huge intermediate buffers.
2253                  */
2254                 useraddr += sizeof(t);  /* advance to start of buffer */
2255                 while (t.len) {
2256                         unsigned int chunk =
2257                                 min_t(unsigned int, t.len, sizeof(buf));
2258
2259                         ret =
2260                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2261                                                 buf);
2262                         if (ret)
2263                                 return ret;
2264                         if (copy_to_user(useraddr, buf, chunk))
2265                                 return -EFAULT;
2266                         useraddr += chunk;
2267                         t.addr += chunk;
2268                         t.len -= chunk;
2269                 }
2270                 break;
2271         }
2272         case CHELSIO_SET_TRACE_FILTER:{
2273                 struct ch_trace t;
2274                 const struct trace_params *tp;
2275
2276                 if (!capable(CAP_NET_ADMIN))
2277                         return -EPERM;
2278                 if (!offload_running(adapter))
2279                         return -EAGAIN;
2280                 if (copy_from_user(&t, useraddr, sizeof(t)))
2281                         return -EFAULT;
2282
2283                 tp = (const struct trace_params *)&t.sip;
2284                 if (t.config_tx)
2285                         t3_config_trace_filter(adapter, tp, 0,
2286                                                 t.invert_match,
2287                                                 t.trace_tx);
2288                 if (t.config_rx)
2289                         t3_config_trace_filter(adapter, tp, 1,
2290                                                 t.invert_match,
2291                                                 t.trace_rx);
2292                 break;
2293         }
2294         default:
2295                 return -EOPNOTSUPP;
2296         }
2297         return 0;
2298 }
2299
2300 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2301 {
2302         struct mii_ioctl_data *data = if_mii(req);
2303         struct port_info *pi = netdev_priv(dev);
2304         struct adapter *adapter = pi->adapter;
2305         int ret, mmd;
2306
2307         switch (cmd) {
2308         case SIOCGMIIPHY:
2309                 data->phy_id = pi->phy.addr;
2310                 /* FALLTHRU */
2311         case SIOCGMIIREG:{
2312                 u32 val;
2313                 struct cphy *phy = &pi->phy;
2314
2315                 if (!phy->mdio_read)
2316                         return -EOPNOTSUPP;
2317                 if (is_10G(adapter)) {
2318                         mmd = data->phy_id >> 8;
2319                         if (!mmd)
2320                                 mmd = MDIO_DEV_PCS;
2321                         else if (mmd > MDIO_DEV_VEND2)
2322                                 return -EINVAL;
2323
2324                         ret =
2325                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2326                                                 mmd, data->reg_num, &val);
2327                 } else
2328                         ret =
2329                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2330                                                 0, data->reg_num & 0x1f,
2331                                                 &val);
2332                 if (!ret)
2333                         data->val_out = val;
2334                 break;
2335         }
2336         case SIOCSMIIREG:{
2337                 struct cphy *phy = &pi->phy;
2338
2339                 if (!capable(CAP_NET_ADMIN))
2340                         return -EPERM;
2341                 if (!phy->mdio_write)
2342                         return -EOPNOTSUPP;
2343                 if (is_10G(adapter)) {
2344                         mmd = data->phy_id >> 8;
2345                         if (!mmd)
2346                                 mmd = MDIO_DEV_PCS;
2347                         else if (mmd > MDIO_DEV_VEND2)
2348                                 return -EINVAL;
2349
2350                         ret =
2351                                 phy->mdio_write(adapter,
2352                                                 data->phy_id & 0x1f, mmd,
2353                                                 data->reg_num,
2354                                                 data->val_in);
2355                 } else
2356                         ret =
2357                                 phy->mdio_write(adapter,
2358                                                 data->phy_id & 0x1f, 0,
2359                                                 data->reg_num & 0x1f,
2360                                                 data->val_in);
2361                 break;
2362         }
2363         case SIOCCHIOCTL:
2364                 return cxgb_extension_ioctl(dev, req->ifr_data);
2365         default:
2366                 return -EOPNOTSUPP;
2367         }
2368         return ret;
2369 }
2370
2371 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2372 {
2373         struct port_info *pi = netdev_priv(dev);
2374         struct adapter *adapter = pi->adapter;
2375         int ret;
2376
2377         if (new_mtu < 81)       /* accommodate SACK */
2378                 return -EINVAL;
2379         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2380                 return ret;
2381         dev->mtu = new_mtu;
2382         init_port_mtus(adapter);
2383         if (adapter->params.rev == 0 && offload_running(adapter))
2384                 t3_load_mtus(adapter, adapter->params.mtus,
2385                              adapter->params.a_wnd, adapter->params.b_wnd,
2386                              adapter->port[0]->mtu);
2387         return 0;
2388 }
2389
2390 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2391 {
2392         struct port_info *pi = netdev_priv(dev);
2393         struct adapter *adapter = pi->adapter;
2394         struct sockaddr *addr = p;
2395
2396         if (!is_valid_ether_addr(addr->sa_data))
2397                 return -EINVAL;
2398
2399         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2400         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2401         if (offload_running(adapter))
2402                 write_smt_entry(adapter, pi->port_id);
2403         return 0;
2404 }
2405
2406 /**
2407  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2408  * @adap: the adapter
2409  * @p: the port
2410  *
2411  * Ensures that current Rx processing on any of the queues associated with
2412  * the given port completes before returning.  We do this by acquiring and
2413  * releasing the locks of the response queues associated with the port.
2414  */
2415 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2416 {
2417         int i;
2418
2419         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2420                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2421
2422                 spin_lock_irq(&q->lock);
2423                 spin_unlock_irq(&q->lock);
2424         }
2425 }
2426
2427 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2428 {
2429         struct port_info *pi = netdev_priv(dev);
2430         struct adapter *adapter = pi->adapter;
2431
2432         pi->vlan_grp = grp;
2433         if (adapter->params.rev > 0)
2434                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2435         else {
2436                 /* single control for all ports */
2437                 unsigned int i, have_vlans = 0;
2438                 for_each_port(adapter, i)
2439                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2440
2441                 t3_set_vlan_accel(adapter, 1, have_vlans);
2442         }
2443         t3_synchronize_rx(adapter, pi);
2444 }
2445
2446 #ifdef CONFIG_NET_POLL_CONTROLLER
2447 static void cxgb_netpoll(struct net_device *dev)
2448 {
2449         struct port_info *pi = netdev_priv(dev);
2450         struct adapter *adapter = pi->adapter;
2451         int qidx;
2452
2453         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2454                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2455                 void *source;
2456
2457                 if (adapter->flags & USING_MSIX)
2458                         source = qs;
2459                 else
2460                         source = adapter;
2461
2462                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2463         }
2464 }
2465 #endif
2466
2467 /*
2468  * Periodic accumulation of MAC statistics.
2469  */
2470 static void mac_stats_update(struct adapter *adapter)
2471 {
2472         int i;
2473
2474         for_each_port(adapter, i) {
2475                 struct net_device *dev = adapter->port[i];
2476                 struct port_info *p = netdev_priv(dev);
2477
2478                 if (netif_running(dev)) {
2479                         spin_lock(&adapter->stats_lock);
2480                         t3_mac_update_stats(&p->mac);
2481                         spin_unlock(&adapter->stats_lock);
2482                 }
2483         }
2484 }
2485
2486 static void check_link_status(struct adapter *adapter)
2487 {
2488         int i;
2489
2490         for_each_port(adapter, i) {
2491                 struct net_device *dev = adapter->port[i];
2492                 struct port_info *p = netdev_priv(dev);
2493
2494                 spin_lock_irq(&adapter->work_lock);
2495                 if (p->link_fault) {
2496                         spin_unlock_irq(&adapter->work_lock);
2497                         continue;
2498                 }
2499                 spin_unlock_irq(&adapter->work_lock);
2500
2501                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2502                         t3_xgm_intr_disable(adapter, i);
2503                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2504
2505                         t3_link_changed(adapter, i);
2506                         t3_xgm_intr_enable(adapter, i);
2507                 }
2508         }
2509 }
2510
2511 static void check_t3b2_mac(struct adapter *adapter)
2512 {
2513         int i;
2514
2515         if (!rtnl_trylock())    /* synchronize with ifdown */
2516                 return;
2517
2518         for_each_port(adapter, i) {
2519                 struct net_device *dev = adapter->port[i];
2520                 struct port_info *p = netdev_priv(dev);
2521                 int status;
2522
2523                 if (!netif_running(dev))
2524                         continue;
2525
2526                 status = 0;
2527                 if (netif_running(dev) && netif_carrier_ok(dev))
2528                         status = t3b2_mac_watchdog_task(&p->mac);
2529                 if (status == 1)
2530                         p->mac.stats.num_toggled++;
2531                 else if (status == 2) {
2532                         struct cmac *mac = &p->mac;
2533
2534                         t3_mac_set_mtu(mac, dev->mtu);
2535                         t3_mac_set_address(mac, 0, dev->dev_addr);
2536                         cxgb_set_rxmode(dev);
2537                         t3_link_start(&p->phy, mac, &p->link_config);
2538                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2539                         t3_port_intr_enable(adapter, p->port_id);
2540                         p->mac.stats.num_resets++;
2541                 }
2542         }
2543         rtnl_unlock();
2544 }
2545
2546
2547 static void t3_adap_check_task(struct work_struct *work)
2548 {
2549         struct adapter *adapter = container_of(work, struct adapter,
2550                                                adap_check_task.work);
2551         const struct adapter_params *p = &adapter->params;
2552         int port;
2553         unsigned int v, status, reset;
2554
2555         adapter->check_task_cnt++;
2556
2557         /* Check link status for PHYs without interrupts */
2558         if (p->linkpoll_period)
2559                 check_link_status(adapter);
2560
2561         /* Accumulate MAC stats if needed */
2562         if (!p->linkpoll_period ||
2563             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2564             p->stats_update_period) {
2565                 mac_stats_update(adapter);
2566                 adapter->check_task_cnt = 0;
2567         }
2568
2569         if (p->rev == T3_REV_B2)
2570                 check_t3b2_mac(adapter);
2571
2572         /*
2573          * Scan the XGMAC's to check for various conditions which we want to
2574          * monitor in a periodic polling manner rather than via an interrupt
2575          * condition.  This is used for conditions which would otherwise flood
2576          * the system with interrupts and we only really need to know that the
2577          * conditions are "happening" ...  For each condition we count the
2578          * detection of the condition and reset it for the next polling loop.
2579          */
2580         for_each_port(adapter, port) {
2581                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2582                 u32 cause;
2583
2584                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2585                 reset = 0;
2586                 if (cause & F_RXFIFO_OVERFLOW) {
2587                         mac->stats.rx_fifo_ovfl++;
2588                         reset |= F_RXFIFO_OVERFLOW;
2589                 }
2590
2591                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2592         }
2593
2594         /*
2595          * We do the same as above for FL_EMPTY interrupts.
2596          */
2597         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2598         reset = 0;
2599
2600         if (status & F_FLEMPTY) {
2601                 struct sge_qset *qs = &adapter->sge.qs[0];
2602                 int i = 0;
2603
2604                 reset |= F_FLEMPTY;
2605
2606                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2607                     0xffff;
2608
2609                 while (v) {
2610                         qs->fl[i].empty += (v & 1);
2611                         if (i)
2612                                 qs++;
2613                         i ^= 1;
2614                         v >>= 1;
2615                 }
2616         }
2617
2618         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2619
2620         /* Schedule the next check update if any port is active. */
2621         spin_lock_irq(&adapter->work_lock);
2622         if (adapter->open_device_map & PORT_MASK)
2623                 schedule_chk_task(adapter);
2624         spin_unlock_irq(&adapter->work_lock);
2625 }
2626
2627 /*
2628  * Processes external (PHY) interrupts in process context.
2629  */
2630 static void ext_intr_task(struct work_struct *work)
2631 {
2632         struct adapter *adapter = container_of(work, struct adapter,
2633                                                ext_intr_handler_task);
2634         int i;
2635
2636         /* Disable link fault interrupts */
2637         for_each_port(adapter, i) {
2638                 struct net_device *dev = adapter->port[i];
2639                 struct port_info *p = netdev_priv(dev);
2640
2641                 t3_xgm_intr_disable(adapter, i);
2642                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2643         }
2644
2645         /* Re-enable link fault interrupts */
2646         t3_phy_intr_handler(adapter);
2647
2648         for_each_port(adapter, i)
2649                 t3_xgm_intr_enable(adapter, i);
2650
2651         /* Now reenable external interrupts */
2652         spin_lock_irq(&adapter->work_lock);
2653         if (adapter->slow_intr_mask) {
2654                 adapter->slow_intr_mask |= F_T3DBG;
2655                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2656                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2657                              adapter->slow_intr_mask);
2658         }
2659         spin_unlock_irq(&adapter->work_lock);
2660 }
2661
2662 /*
2663  * Interrupt-context handler for external (PHY) interrupts.
2664  */
2665 void t3_os_ext_intr_handler(struct adapter *adapter)
2666 {
2667         /*
2668          * Schedule a task to handle external interrupts as they may be slow
2669          * and we use a mutex to protect MDIO registers.  We disable PHY
2670          * interrupts in the meantime and let the task reenable them when
2671          * it's done.
2672          */
2673         spin_lock(&adapter->work_lock);
2674         if (adapter->slow_intr_mask) {
2675                 adapter->slow_intr_mask &= ~F_T3DBG;
2676                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2677                              adapter->slow_intr_mask);
2678                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2679         }
2680         spin_unlock(&adapter->work_lock);
2681 }
2682
2683 static void link_fault_task(struct work_struct *work)
2684 {
2685         struct adapter *adapter = container_of(work, struct adapter,
2686                                                link_fault_handler_task);
2687         int i;
2688
2689         for_each_port(adapter, i) {
2690                 struct net_device *netdev = adapter->port[i];
2691                 struct port_info *pi = netdev_priv(netdev);
2692
2693                 if (pi->link_fault)
2694                         t3_link_fault(adapter, i);
2695         }
2696 }
2697
2698 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2699 {
2700         struct net_device *netdev = adapter->port[port_id];
2701         struct port_info *pi = netdev_priv(netdev);
2702
2703         spin_lock(&adapter->work_lock);
2704         pi->link_fault = 1;
2705         queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
2706         spin_unlock(&adapter->work_lock);
2707 }
2708
2709 static int t3_adapter_error(struct adapter *adapter, int reset)
2710 {
2711         int i, ret = 0;
2712
2713         if (is_offload(adapter) &&
2714             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2715                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2716                 offload_close(&adapter->tdev);
2717         }
2718
2719         /* Stop all ports */
2720         for_each_port(adapter, i) {
2721                 struct net_device *netdev = adapter->port[i];
2722
2723                 if (netif_running(netdev))
2724                         cxgb_close(netdev);
2725         }
2726
2727         /* Stop SGE timers */
2728         t3_stop_sge_timers(adapter);
2729
2730         adapter->flags &= ~FULL_INIT_DONE;
2731
2732         if (reset)
2733                 ret = t3_reset_adapter(adapter);
2734
2735         pci_disable_device(adapter->pdev);
2736
2737         return ret;
2738 }
2739
2740 static int t3_reenable_adapter(struct adapter *adapter)
2741 {
2742         if (pci_enable_device(adapter->pdev)) {
2743                 dev_err(&adapter->pdev->dev,
2744                         "Cannot re-enable PCI device after reset.\n");
2745                 goto err;
2746         }
2747         pci_set_master(adapter->pdev);
2748         pci_restore_state(adapter->pdev);
2749
2750         /* Free sge resources */
2751         t3_free_sge_resources(adapter);
2752
2753         if (t3_replay_prep_adapter(adapter))
2754                 goto err;
2755
2756         return 0;
2757 err:
2758         return -1;
2759 }
2760
2761 static void t3_resume_ports(struct adapter *adapter)
2762 {
2763         int i;
2764
2765         /* Restart the ports */
2766         for_each_port(adapter, i) {
2767                 struct net_device *netdev = adapter->port[i];
2768
2769                 if (netif_running(netdev)) {
2770                         if (cxgb_open(netdev)) {
2771                                 dev_err(&adapter->pdev->dev,
2772                                         "can't bring device back up"
2773                                         " after reset\n");
2774                                 continue;
2775                         }
2776                 }
2777         }
2778
2779         if (is_offload(adapter) && !ofld_disable)
2780                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2781 }
2782
2783 /*
2784  * processes a fatal error.
2785  * Bring the ports down, reset the chip, bring the ports back up.
2786  */
2787 static void fatal_error_task(struct work_struct *work)
2788 {
2789         struct adapter *adapter = container_of(work, struct adapter,
2790                                                fatal_error_handler_task);
2791         int err = 0;
2792
2793         rtnl_lock();
2794         err = t3_adapter_error(adapter, 1);
2795         if (!err)
2796                 err = t3_reenable_adapter(adapter);
2797         if (!err)
2798                 t3_resume_ports(adapter);
2799
2800         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2801         rtnl_unlock();
2802 }
2803
2804 void t3_fatal_err(struct adapter *adapter)
2805 {
2806         unsigned int fw_status[4];
2807
2808         if (adapter->flags & FULL_INIT_DONE) {
2809                 t3_sge_stop(adapter);
2810                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2811                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2812                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2813                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2814
2815                 spin_lock(&adapter->work_lock);
2816                 t3_intr_disable(adapter);
2817                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2818                 spin_unlock(&adapter->work_lock);
2819         }
2820         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2821         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2822                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2823                          fw_status[0], fw_status[1],
2824                          fw_status[2], fw_status[3]);
2825 }
2826
2827 /**
2828  * t3_io_error_detected - called when PCI error is detected
2829  * @pdev: Pointer to PCI device
2830  * @state: The current pci connection state
2831  *
2832  * This function is called after a PCI bus error affecting
2833  * this device has been detected.
2834  */
2835 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2836                                              pci_channel_state_t state)
2837 {
2838         struct adapter *adapter = pci_get_drvdata(pdev);
2839         int ret;
2840
2841         ret = t3_adapter_error(adapter, 0);
2842
2843         /* Request a slot reset. */
2844         return PCI_ERS_RESULT_NEED_RESET;
2845 }
2846
2847 /**
2848  * t3_io_slot_reset - called after the pci bus has been reset.
2849  * @pdev: Pointer to PCI device
2850  *
2851  * Restart the card from scratch, as if from a cold-boot.
2852  */
2853 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2854 {
2855         struct adapter *adapter = pci_get_drvdata(pdev);
2856
2857         if (!t3_reenable_adapter(adapter))
2858                 return PCI_ERS_RESULT_RECOVERED;
2859
2860         return PCI_ERS_RESULT_DISCONNECT;
2861 }
2862
2863 /**
2864  * t3_io_resume - called when traffic can start flowing again.
2865  * @pdev: Pointer to PCI device
2866  *
2867  * This callback is called when the error recovery driver tells us that
2868  * its OK to resume normal operation.
2869  */
2870 static void t3_io_resume(struct pci_dev *pdev)
2871 {
2872         struct adapter *adapter = pci_get_drvdata(pdev);
2873
2874         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2876
2877         t3_resume_ports(adapter);
2878 }
2879
2880 static struct pci_error_handlers t3_err_handler = {
2881         .error_detected = t3_io_error_detected,
2882         .slot_reset = t3_io_slot_reset,
2883         .resume = t3_io_resume,
2884 };
2885
2886 /*
2887  * Set the number of qsets based on the number of CPUs and the number of ports,
2888  * not to exceed the number of available qsets, assuming there are enough qsets
2889  * per port in HW.
2890  */
2891 static void set_nqsets(struct adapter *adap)
2892 {
2893         int i, j = 0;
2894         int num_cpus = num_online_cpus();
2895         int hwports = adap->params.nports;
2896         int nqsets = adap->msix_nvectors - 1;
2897
2898         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2899                 if (hwports == 2 &&
2900                     (hwports * nqsets > SGE_QSETS ||
2901                      num_cpus >= nqsets / hwports))
2902                         nqsets /= hwports;
2903                 if (nqsets > num_cpus)
2904                         nqsets = num_cpus;
2905                 if (nqsets < 1 || hwports == 4)
2906                         nqsets = 1;
2907         } else
2908                 nqsets = 1;
2909
2910         for_each_port(adap, i) {
2911                 struct port_info *pi = adap2pinfo(adap, i);
2912
2913                 pi->first_qset = j;
2914                 pi->nqsets = nqsets;
2915                 j = pi->first_qset + nqsets;
2916
2917                 dev_info(&adap->pdev->dev,
2918                          "Port %d using %d queue sets.\n", i, nqsets);
2919         }
2920 }
2921
2922 static int __devinit cxgb_enable_msix(struct adapter *adap)
2923 {
2924         struct msix_entry entries[SGE_QSETS + 1];
2925         int vectors;
2926         int i, err;
2927
2928         vectors = ARRAY_SIZE(entries);
2929         for (i = 0; i < vectors; ++i)
2930                 entries[i].entry = i;
2931
2932         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2933                 vectors = err;
2934
2935         if (!err && vectors < (adap->params.nports + 1))
2936                 err = -1;
2937
2938         if (!err) {
2939                 for (i = 0; i < vectors; ++i)
2940                         adap->msix_info[i].vec = entries[i].vector;
2941                 adap->msix_nvectors = vectors;
2942         }
2943
2944         return err;
2945 }
2946
2947 static void __devinit print_port_info(struct adapter *adap,
2948                                       const struct adapter_info *ai)
2949 {
2950         static const char *pci_variant[] = {
2951                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2952         };
2953
2954         int i;
2955         char buf[80];
2956
2957         if (is_pcie(adap))
2958                 snprintf(buf, sizeof(buf), "%s x%d",
2959                          pci_variant[adap->params.pci.variant],
2960                          adap->params.pci.width);
2961         else
2962                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2963                          pci_variant[adap->params.pci.variant],
2964                          adap->params.pci.speed, adap->params.pci.width);
2965
2966         for_each_port(adap, i) {
2967                 struct net_device *dev = adap->port[i];
2968                 const struct port_info *pi = netdev_priv(dev);
2969
2970                 if (!test_bit(i, &adap->registered_device_map))
2971                         continue;
2972                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2973                        dev->name, ai->desc, pi->phy.desc,
2974                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2975                        (adap->flags & USING_MSIX) ? " MSI-X" :
2976                        (adap->flags & USING_MSI) ? " MSI" : "");
2977                 if (adap->name == dev->name && adap->params.vpd.mclk)
2978                         printk(KERN_INFO
2979                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2980                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2981                                t3_mc7_size(&adap->pmtx) >> 20,
2982                                t3_mc7_size(&adap->pmrx) >> 20,
2983                                adap->params.vpd.sn);
2984         }
2985 }
2986
2987 static const struct net_device_ops cxgb_netdev_ops = {
2988         .ndo_open               = cxgb_open,
2989         .ndo_stop               = cxgb_close,
2990         .ndo_start_xmit         = t3_eth_xmit,
2991         .ndo_get_stats          = cxgb_get_stats,
2992         .ndo_validate_addr      = eth_validate_addr,
2993         .ndo_set_multicast_list = cxgb_set_rxmode,
2994         .ndo_do_ioctl           = cxgb_ioctl,
2995         .ndo_change_mtu         = cxgb_change_mtu,
2996         .ndo_set_mac_address    = cxgb_set_mac_addr,
2997         .ndo_vlan_rx_register   = vlan_rx_register,
2998 #ifdef CONFIG_NET_POLL_CONTROLLER
2999         .ndo_poll_controller    = cxgb_netpoll,
3000 #endif
3001 };
3002
3003 static int __devinit init_one(struct pci_dev *pdev,
3004                               const struct pci_device_id *ent)
3005 {
3006         static int version_printed;
3007
3008         int i, err, pci_using_dac = 0;
3009         resource_size_t mmio_start, mmio_len;
3010         const struct adapter_info *ai;
3011         struct adapter *adapter = NULL;
3012         struct port_info *pi;
3013
3014         if (!version_printed) {
3015                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3016                 ++version_printed;
3017         }
3018
3019         if (!cxgb3_wq) {
3020                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3021                 if (!cxgb3_wq) {
3022                         printk(KERN_ERR DRV_NAME
3023                                ": cannot initialize work queue\n");
3024                         return -ENOMEM;
3025                 }
3026         }
3027
3028         err = pci_request_regions(pdev, DRV_NAME);
3029         if (err) {
3030                 /* Just info, some other driver may have claimed the device. */
3031                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3032                 return err;
3033         }
3034
3035         err = pci_enable_device(pdev);
3036         if (err) {
3037                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3038                 goto out_release_regions;
3039         }
3040
3041         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3042                 pci_using_dac = 1;
3043                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3044                 if (err) {
3045                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3046                                "coherent allocations\n");
3047                         goto out_disable_device;
3048                 }
3049         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
3050                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3051                 goto out_disable_device;
3052         }
3053
3054         pci_set_master(pdev);
3055         pci_save_state(pdev);
3056
3057         mmio_start = pci_resource_start(pdev, 0);
3058         mmio_len = pci_resource_len(pdev, 0);
3059         ai = t3_get_adapter_info(ent->driver_data);
3060
3061         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3062         if (!adapter) {
3063                 err = -ENOMEM;
3064                 goto out_disable_device;
3065         }
3066
3067         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3068         if (!adapter->regs) {
3069                 dev_err(&pdev->dev, "cannot map device registers\n");
3070                 err = -ENOMEM;
3071                 goto out_free_adapter;
3072         }
3073
3074         adapter->pdev = pdev;
3075         adapter->name = pci_name(pdev);
3076         adapter->msg_enable = dflt_msg_enable;
3077         adapter->mmio_len = mmio_len;
3078
3079         mutex_init(&adapter->mdio_lock);
3080         spin_lock_init(&adapter->work_lock);
3081         spin_lock_init(&adapter->stats_lock);
3082
3083         INIT_LIST_HEAD(&adapter->adapter_list);
3084         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3085         INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
3086         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3087         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3088
3089         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3090                 struct net_device *netdev;
3091
3092                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3093                 if (!netdev) {
3094                         err = -ENOMEM;
3095                         goto out_free_dev;
3096                 }
3097
3098                 SET_NETDEV_DEV(netdev, &pdev->dev);
3099
3100                 adapter->port[i] = netdev;
3101                 pi = netdev_priv(netdev);
3102                 pi->adapter = adapter;
3103                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3104                 pi->port_id = i;
3105                 netif_carrier_off(netdev);
3106                 netif_tx_stop_all_queues(netdev);
3107                 netdev->irq = pdev->irq;
3108                 netdev->mem_start = mmio_start;
3109                 netdev->mem_end = mmio_start + mmio_len - 1;
3110                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3111                 netdev->features |= NETIF_F_LLTX;
3112                 netdev->features |= NETIF_F_GRO;
3113                 if (pci_using_dac)
3114                         netdev->features |= NETIF_F_HIGHDMA;
3115
3116                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3117                 netdev->netdev_ops = &cxgb_netdev_ops;
3118                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3119         }
3120
3121         pci_set_drvdata(pdev, adapter);
3122         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3123                 err = -ENODEV;
3124                 goto out_free_dev;
3125         }
3126
3127         /*
3128          * The card is now ready to go.  If any errors occur during device
3129          * registration we do not fail the whole card but rather proceed only
3130          * with the ports we manage to register successfully.  However we must
3131          * register at least one net device.
3132          */
3133         for_each_port(adapter, i) {
3134                 err = register_netdev(adapter->port[i]);
3135                 if (err)
3136                         dev_warn(&pdev->dev,
3137                                  "cannot register net device %s, skipping\n",
3138                                  adapter->port[i]->name);
3139                 else {
3140                         /*
3141                          * Change the name we use for messages to the name of
3142                          * the first successfully registered interface.
3143                          */
3144                         if (!adapter->registered_device_map)
3145                                 adapter->name = adapter->port[i]->name;
3146
3147                         __set_bit(i, &adapter->registered_device_map);
3148                 }
3149         }
3150         if (!adapter->registered_device_map) {
3151                 dev_err(&pdev->dev, "could not register any net devices\n");
3152                 goto out_free_dev;
3153         }
3154
3155         /* Driver's ready. Reflect it on LEDs */
3156         t3_led_ready(adapter);
3157
3158         if (is_offload(adapter)) {
3159                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3160                 cxgb3_adapter_ofld(adapter);
3161         }
3162
3163         /* See what interrupts we'll be using */
3164         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3165                 adapter->flags |= USING_MSIX;
3166         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3167                 adapter->flags |= USING_MSI;
3168
3169         set_nqsets(adapter);
3170
3171         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3172                                  &cxgb3_attr_group);
3173
3174         print_port_info(adapter, ai);
3175         return 0;
3176
3177 out_free_dev:
3178         iounmap(adapter->regs);
3179         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3180                 if (adapter->port[i])
3181                         free_netdev(adapter->port[i]);
3182
3183 out_free_adapter:
3184         kfree(adapter);
3185
3186 out_disable_device:
3187         pci_disable_device(pdev);
3188 out_release_regions:
3189         pci_release_regions(pdev);
3190         pci_set_drvdata(pdev, NULL);
3191         return err;
3192 }
3193
3194 static void __devexit remove_one(struct pci_dev *pdev)
3195 {
3196         struct adapter *adapter = pci_get_drvdata(pdev);
3197
3198         if (adapter) {
3199                 int i;
3200
3201                 t3_sge_stop(adapter);
3202                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3203                                    &cxgb3_attr_group);
3204
3205                 if (is_offload(adapter)) {
3206                         cxgb3_adapter_unofld(adapter);
3207                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3208                                      &adapter->open_device_map))
3209                                 offload_close(&adapter->tdev);
3210                 }
3211
3212                 for_each_port(adapter, i)
3213                     if (test_bit(i, &adapter->registered_device_map))
3214                         unregister_netdev(adapter->port[i]);
3215
3216                 t3_stop_sge_timers(adapter);
3217                 t3_free_sge_resources(adapter);
3218                 cxgb_disable_msi(adapter);
3219
3220                 for_each_port(adapter, i)
3221                         if (adapter->port[i])
3222                                 free_netdev(adapter->port[i]);
3223
3224                 iounmap(adapter->regs);
3225                 kfree(adapter);
3226                 pci_release_regions(pdev);
3227                 pci_disable_device(pdev);
3228                 pci_set_drvdata(pdev, NULL);
3229         }
3230 }
3231
3232 static struct pci_driver driver = {
3233         .name = DRV_NAME,
3234         .id_table = cxgb3_pci_tbl,
3235         .probe = init_one,
3236         .remove = __devexit_p(remove_one),
3237         .err_handler = &t3_err_handler,
3238 };
3239
3240 static int __init cxgb3_init_module(void)
3241 {
3242         int ret;
3243
3244         cxgb3_offload_init();
3245
3246         ret = pci_register_driver(&driver);
3247         return ret;
3248 }
3249
3250 static void __exit cxgb3_cleanup_module(void)
3251 {
3252         pci_unregister_driver(&driver);
3253         if (cxgb3_wq)
3254                 destroy_workqueue(cxgb3_wq);
3255 }
3256
3257 module_init(cxgb3_init_module);
3258 module_exit(cxgb3_cleanup_module);