]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/cxgb3/cxgb3_main.c
Merge branch 'thermal' into release
[linux-2.6-omap-h63xx.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         {0,}
95 };
96
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108 /*
109  * The driver uses the best interrupt scheme available on a platform in the
110  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
111  * of these schemes the driver may consider as follows:
112  *
113  * msi = 2: choose from among all three options
114  * msi = 1: only consider MSI and pin interrupts
115  * msi = 0: force pin interrupts
116  */
117 static int msi = 2;
118
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122 /*
123  * The driver enables offload as a default.
124  * To disable it, use ofld_disable = 1.
125  */
126
127 static int ofld_disable = 0;
128
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132 /*
133  * We have work elements that we need to cancel when an interface is taken
134  * down.  Normally the work elements would be executed by keventd but that
135  * can deadlock because of linkwatch.  If our close method takes the rtnl
136  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138  * for our work to complete.  Get our own work queue to solve this.
139  */
140 static struct workqueue_struct *cxgb3_wq;
141
142 /**
143  *      link_report - show link status and link speed/duplex
144  *      @p: the port whose settings are to be reported
145  *
146  *      Shows the link status, speed, and duplex of a port.
147  */
148 static void link_report(struct net_device *dev)
149 {
150         if (!netif_carrier_ok(dev))
151                 printk(KERN_INFO "%s: link down\n", dev->name);
152         else {
153                 const char *s = "10Mbps";
154                 const struct port_info *p = netdev_priv(dev);
155
156                 switch (p->link_config.speed) {
157                 case SPEED_10000:
158                         s = "10Gbps";
159                         break;
160                 case SPEED_1000:
161                         s = "1000Mbps";
162                         break;
163                 case SPEED_100:
164                         s = "100Mbps";
165                         break;
166                 }
167
168                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170         }
171 }
172
173 /**
174  *      t3_os_link_changed - handle link status changes
175  *      @adapter: the adapter associated with the link change
176  *      @port_id: the port index whose limk status has changed
177  *      @link_stat: the new status of the link
178  *      @speed: the new speed setting
179  *      @duplex: the new duplex setting
180  *      @pause: the new flow-control setting
181  *
182  *      This is the OS-dependent handler for link status changes.  The OS
183  *      neutral handler takes care of most of the processing for these events,
184  *      then calls this handler for any OS-specific processing.
185  */
186 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
187                         int speed, int duplex, int pause)
188 {
189         struct net_device *dev = adapter->port[port_id];
190         struct port_info *pi = netdev_priv(dev);
191         struct cmac *mac = &pi->mac;
192
193         /* Skip changes from disabled ports. */
194         if (!netif_running(dev))
195                 return;
196
197         if (link_stat != netif_carrier_ok(dev)) {
198                 if (link_stat) {
199                         t3_mac_enable(mac, MAC_DIRECTION_RX);
200                         netif_carrier_on(dev);
201                 } else {
202                         netif_carrier_off(dev);
203                         pi->phy.ops->power_down(&pi->phy, 1);
204                         t3_mac_disable(mac, MAC_DIRECTION_RX);
205                         t3_link_start(&pi->phy, mac, &pi->link_config);
206                 }
207
208                 link_report(dev);
209         }
210 }
211
212 /**
213  *      t3_os_phymod_changed - handle PHY module changes
214  *      @phy: the PHY reporting the module change
215  *      @mod_type: new module type
216  *
217  *      This is the OS-dependent handler for PHY module changes.  It is
218  *      invoked when a PHY module is removed or inserted for any OS-specific
219  *      processing.
220  */
221 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 {
223         static const char *mod_str[] = {
224                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
225         };
226
227         const struct net_device *dev = adap->port[port_id];
228         const struct port_info *pi = netdev_priv(dev);
229
230         if (pi->phy.modtype == phy_modtype_none)
231                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232         else
233                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
234                        mod_str[pi->phy.modtype]);
235 }
236
237 static void cxgb_set_rxmode(struct net_device *dev)
238 {
239         struct t3_rx_mode rm;
240         struct port_info *pi = netdev_priv(dev);
241
242         init_rx_mode(&rm, dev, dev->mc_list);
243         t3_mac_set_rx_mode(&pi->mac, &rm);
244 }
245
246 /**
247  *      link_start - enable a port
248  *      @dev: the device to enable
249  *
250  *      Performs the MAC and PHY actions needed to enable a port.
251  */
252 static void link_start(struct net_device *dev)
253 {
254         struct t3_rx_mode rm;
255         struct port_info *pi = netdev_priv(dev);
256         struct cmac *mac = &pi->mac;
257
258         init_rx_mode(&rm, dev, dev->mc_list);
259         t3_mac_reset(mac);
260         t3_mac_set_mtu(mac, dev->mtu);
261         t3_mac_set_address(mac, 0, dev->dev_addr);
262         t3_mac_set_rx_mode(mac, &rm);
263         t3_link_start(&pi->phy, mac, &pi->link_config);
264         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
265 }
266
267 static inline void cxgb_disable_msi(struct adapter *adapter)
268 {
269         if (adapter->flags & USING_MSIX) {
270                 pci_disable_msix(adapter->pdev);
271                 adapter->flags &= ~USING_MSIX;
272         } else if (adapter->flags & USING_MSI) {
273                 pci_disable_msi(adapter->pdev);
274                 adapter->flags &= ~USING_MSI;
275         }
276 }
277
278 /*
279  * Interrupt handler for asynchronous events used with MSI-X.
280  */
281 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 {
283         t3_slow_intr_handler(cookie);
284         return IRQ_HANDLED;
285 }
286
287 /*
288  * Name the MSI-X interrupts.
289  */
290 static void name_msix_vecs(struct adapter *adap)
291 {
292         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293
294         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
295         adap->msix_info[0].desc[n] = 0;
296
297         for_each_port(adap, j) {
298                 struct net_device *d = adap->port[j];
299                 const struct port_info *pi = netdev_priv(d);
300
301                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
302                         snprintf(adap->msix_info[msi_idx].desc, n,
303                                  "%s-%d", d->name, pi->first_qset + i);
304                         adap->msix_info[msi_idx].desc[n] = 0;
305                 }
306         }
307 }
308
309 static int request_msix_data_irqs(struct adapter *adap)
310 {
311         int i, j, err, qidx = 0;
312
313         for_each_port(adap, i) {
314                 int nqsets = adap2pinfo(adap, i)->nqsets;
315
316                 for (j = 0; j < nqsets; ++j) {
317                         err = request_irq(adap->msix_info[qidx + 1].vec,
318                                           t3_intr_handler(adap,
319                                                           adap->sge.qs[qidx].
320                                                           rspq.polling), 0,
321                                           adap->msix_info[qidx + 1].desc,
322                                           &adap->sge.qs[qidx]);
323                         if (err) {
324                                 while (--qidx >= 0)
325                                         free_irq(adap->msix_info[qidx + 1].vec,
326                                                  &adap->sge.qs[qidx]);
327                                 return err;
328                         }
329                         qidx++;
330                 }
331         }
332         return 0;
333 }
334
335 static void free_irq_resources(struct adapter *adapter)
336 {
337         if (adapter->flags & USING_MSIX) {
338                 int i, n = 0;
339
340                 free_irq(adapter->msix_info[0].vec, adapter);
341                 for_each_port(adapter, i)
342                     n += adap2pinfo(adapter, i)->nqsets;
343
344                 for (i = 0; i < n; ++i)
345                         free_irq(adapter->msix_info[i + 1].vec,
346                                  &adapter->sge.qs[i]);
347         } else
348                 free_irq(adapter->pdev->irq, adapter);
349 }
350
351 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
352                               unsigned long n)
353 {
354         int attempts = 5;
355
356         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
357                 if (!--attempts)
358                         return -ETIMEDOUT;
359                 msleep(10);
360         }
361         return 0;
362 }
363
364 static int init_tp_parity(struct adapter *adap)
365 {
366         int i;
367         struct sk_buff *skb;
368         struct cpl_set_tcb_field *greq;
369         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370
371         t3_tp_set_offload_mode(adap, 1);
372
373         for (i = 0; i < 16; i++) {
374                 struct cpl_smt_write_req *req;
375
376                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
377                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
378                 memset(req, 0, sizeof(*req));
379                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
380                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381                 req->iff = i;
382                 t3_mgmt_tx(adap, skb);
383         }
384
385         for (i = 0; i < 2048; i++) {
386                 struct cpl_l2t_write_req *req;
387
388                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
389                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
390                 memset(req, 0, sizeof(*req));
391                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
392                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
393                 req->params = htonl(V_L2T_W_IDX(i));
394                 t3_mgmt_tx(adap, skb);
395         }
396
397         for (i = 0; i < 2048; i++) {
398                 struct cpl_rte_write_req *req;
399
400                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
401                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
402                 memset(req, 0, sizeof(*req));
403                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
404                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
405                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
406                 t3_mgmt_tx(adap, skb);
407         }
408
409         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
410         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
411         memset(greq, 0, sizeof(*greq));
412         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
413         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
414         greq->mask = cpu_to_be64(1);
415         t3_mgmt_tx(adap, skb);
416
417         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
418         t3_tp_set_offload_mode(adap, 0);
419         return i;
420 }
421
422 /**
423  *      setup_rss - configure RSS
424  *      @adap: the adapter
425  *
426  *      Sets up RSS to distribute packets to multiple receive queues.  We
427  *      configure the RSS CPU lookup table to distribute to the number of HW
428  *      receive queues, and the response queue lookup table to narrow that
429  *      down to the response queues actually configured for each port.
430  *      We always configure the RSS mapping for two ports since the mapping
431  *      table has plenty of entries.
432  */
433 static void setup_rss(struct adapter *adap)
434 {
435         int i;
436         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
437         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
438         u8 cpus[SGE_QSETS + 1];
439         u16 rspq_map[RSS_TABLE_SIZE];
440
441         for (i = 0; i < SGE_QSETS; ++i)
442                 cpus[i] = i;
443         cpus[SGE_QSETS] = 0xff; /* terminator */
444
445         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
446                 rspq_map[i] = i % nq0;
447                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
448         }
449
450         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
451                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
452                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
453 }
454
455 static void init_napi(struct adapter *adap)
456 {
457         int i;
458
459         for (i = 0; i < SGE_QSETS; i++) {
460                 struct sge_qset *qs = &adap->sge.qs[i];
461
462                 if (qs->adap)
463                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
464                                        64);
465         }
466
467         /*
468          * netif_napi_add() can be called only once per napi_struct because it
469          * adds each new napi_struct to a list.  Be careful not to call it a
470          * second time, e.g., during EEH recovery, by making a note of it.
471          */
472         adap->flags |= NAPI_INIT;
473 }
474
475 /*
476  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
477  * both netdevices representing interfaces and the dummy ones for the extra
478  * queues.
479  */
480 static void quiesce_rx(struct adapter *adap)
481 {
482         int i;
483
484         for (i = 0; i < SGE_QSETS; i++)
485                 if (adap->sge.qs[i].adap)
486                         napi_disable(&adap->sge.qs[i].napi);
487 }
488
489 static void enable_all_napi(struct adapter *adap)
490 {
491         int i;
492         for (i = 0; i < SGE_QSETS; i++)
493                 if (adap->sge.qs[i].adap)
494                         napi_enable(&adap->sge.qs[i].napi);
495 }
496
497 /**
498  *      set_qset_lro - Turn a queue set's LRO capability on and off
499  *      @dev: the device the qset is attached to
500  *      @qset_idx: the queue set index
501  *      @val: the LRO switch
502  *
503  *      Sets LRO on or off for a particular queue set.
504  *      the device's features flag is updated to reflect the LRO
505  *      capability when all queues belonging to the device are
506  *      in the same state.
507  */
508 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 {
510         struct port_info *pi = netdev_priv(dev);
511         struct adapter *adapter = pi->adapter;
512         int i, lro_on = 1;
513
514         adapter->params.sge.qset[qset_idx].lro = !!val;
515         adapter->sge.qs[qset_idx].lro_enabled = !!val;
516
517         /* let ethtool report LRO on only if all queues are LRO enabled */
518         for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
519                 lro_on &= adapter->params.sge.qset[i].lro;
520
521         if (lro_on)
522                 dev->features |= NETIF_F_LRO;
523         else
524                 dev->features &= ~NETIF_F_LRO;
525 }
526
527 /**
528  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
529  *      @adap: the adapter
530  *
531  *      Determines how many sets of SGE queues to use and initializes them.
532  *      We support multiple queue sets per port if we have MSI-X, otherwise
533  *      just one queue set per port.
534  */
535 static int setup_sge_qsets(struct adapter *adap)
536 {
537         int i, j, err, irq_idx = 0, qset_idx = 0;
538         unsigned int ntxq = SGE_TXQ_PER_SET;
539
540         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
541                 irq_idx = -1;
542
543         for_each_port(adap, i) {
544                 struct net_device *dev = adap->port[i];
545                 struct port_info *pi = netdev_priv(dev);
546
547                 pi->qs = &adap->sge.qs[pi->first_qset];
548                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
549                      ++j, ++qset_idx) {
550                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
551                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
552                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
553                                                              irq_idx,
554                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
555                                 netdev_get_tx_queue(dev, j));
556                         if (err) {
557                                 t3_stop_sge_timers(adap);
558                                 t3_free_sge_resources(adap);
559                                 return err;
560                         }
561                 }
562         }
563
564         return 0;
565 }
566
567 static ssize_t attr_show(struct device *d, char *buf,
568                          ssize_t(*format) (struct net_device *, char *))
569 {
570         ssize_t len;
571
572         /* Synchronize with ioctls that may shut down the device */
573         rtnl_lock();
574         len = (*format) (to_net_dev(d), buf);
575         rtnl_unlock();
576         return len;
577 }
578
579 static ssize_t attr_store(struct device *d,
580                           const char *buf, size_t len,
581                           ssize_t(*set) (struct net_device *, unsigned int),
582                           unsigned int min_val, unsigned int max_val)
583 {
584         char *endp;
585         ssize_t ret;
586         unsigned int val;
587
588         if (!capable(CAP_NET_ADMIN))
589                 return -EPERM;
590
591         val = simple_strtoul(buf, &endp, 0);
592         if (endp == buf || val < min_val || val > max_val)
593                 return -EINVAL;
594
595         rtnl_lock();
596         ret = (*set) (to_net_dev(d), val);
597         if (!ret)
598                 ret = len;
599         rtnl_unlock();
600         return ret;
601 }
602
603 #define CXGB3_SHOW(name, val_expr) \
604 static ssize_t format_##name(struct net_device *dev, char *buf) \
605 { \
606         struct port_info *pi = netdev_priv(dev); \
607         struct adapter *adap = pi->adapter; \
608         return sprintf(buf, "%u\n", val_expr); \
609 } \
610 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
611                            char *buf) \
612 { \
613         return attr_show(d, buf, format_##name); \
614 }
615
616 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
617 {
618         struct port_info *pi = netdev_priv(dev);
619         struct adapter *adap = pi->adapter;
620         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
621
622         if (adap->flags & FULL_INIT_DONE)
623                 return -EBUSY;
624         if (val && adap->params.rev == 0)
625                 return -EINVAL;
626         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
627             min_tids)
628                 return -EINVAL;
629         adap->params.mc5.nfilters = val;
630         return 0;
631 }
632
633 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
634                               const char *buf, size_t len)
635 {
636         return attr_store(d, buf, len, set_nfilters, 0, ~0);
637 }
638
639 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
640 {
641         struct port_info *pi = netdev_priv(dev);
642         struct adapter *adap = pi->adapter;
643
644         if (adap->flags & FULL_INIT_DONE)
645                 return -EBUSY;
646         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
647             MC5_MIN_TIDS)
648                 return -EINVAL;
649         adap->params.mc5.nservers = val;
650         return 0;
651 }
652
653 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
654                               const char *buf, size_t len)
655 {
656         return attr_store(d, buf, len, set_nservers, 0, ~0);
657 }
658
659 #define CXGB3_ATTR_R(name, val_expr) \
660 CXGB3_SHOW(name, val_expr) \
661 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
662
663 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
664 CXGB3_SHOW(name, val_expr) \
665 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
666
667 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
668 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
669 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
670
671 static struct attribute *cxgb3_attrs[] = {
672         &dev_attr_cam_size.attr,
673         &dev_attr_nfilters.attr,
674         &dev_attr_nservers.attr,
675         NULL
676 };
677
678 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
679
680 static ssize_t tm_attr_show(struct device *d,
681                             char *buf, int sched)
682 {
683         struct port_info *pi = netdev_priv(to_net_dev(d));
684         struct adapter *adap = pi->adapter;
685         unsigned int v, addr, bpt, cpt;
686         ssize_t len;
687
688         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
689         rtnl_lock();
690         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
691         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
692         if (sched & 1)
693                 v >>= 16;
694         bpt = (v >> 8) & 0xff;
695         cpt = v & 0xff;
696         if (!cpt)
697                 len = sprintf(buf, "disabled\n");
698         else {
699                 v = (adap->params.vpd.cclk * 1000) / cpt;
700                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
701         }
702         rtnl_unlock();
703         return len;
704 }
705
706 static ssize_t tm_attr_store(struct device *d,
707                              const char *buf, size_t len, int sched)
708 {
709         struct port_info *pi = netdev_priv(to_net_dev(d));
710         struct adapter *adap = pi->adapter;
711         unsigned int val;
712         char *endp;
713         ssize_t ret;
714
715         if (!capable(CAP_NET_ADMIN))
716                 return -EPERM;
717
718         val = simple_strtoul(buf, &endp, 0);
719         if (endp == buf || val > 10000000)
720                 return -EINVAL;
721
722         rtnl_lock();
723         ret = t3_config_sched(adap, val, sched);
724         if (!ret)
725                 ret = len;
726         rtnl_unlock();
727         return ret;
728 }
729
730 #define TM_ATTR(name, sched) \
731 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
732                            char *buf) \
733 { \
734         return tm_attr_show(d, buf, sched); \
735 } \
736 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
737                             const char *buf, size_t len) \
738 { \
739         return tm_attr_store(d, buf, len, sched); \
740 } \
741 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
742
743 TM_ATTR(sched0, 0);
744 TM_ATTR(sched1, 1);
745 TM_ATTR(sched2, 2);
746 TM_ATTR(sched3, 3);
747 TM_ATTR(sched4, 4);
748 TM_ATTR(sched5, 5);
749 TM_ATTR(sched6, 6);
750 TM_ATTR(sched7, 7);
751
752 static struct attribute *offload_attrs[] = {
753         &dev_attr_sched0.attr,
754         &dev_attr_sched1.attr,
755         &dev_attr_sched2.attr,
756         &dev_attr_sched3.attr,
757         &dev_attr_sched4.attr,
758         &dev_attr_sched5.attr,
759         &dev_attr_sched6.attr,
760         &dev_attr_sched7.attr,
761         NULL
762 };
763
764 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
765
766 /*
767  * Sends an sk_buff to an offload queue driver
768  * after dealing with any active network taps.
769  */
770 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
771 {
772         int ret;
773
774         local_bh_disable();
775         ret = t3_offload_tx(tdev, skb);
776         local_bh_enable();
777         return ret;
778 }
779
780 static int write_smt_entry(struct adapter *adapter, int idx)
781 {
782         struct cpl_smt_write_req *req;
783         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
784
785         if (!skb)
786                 return -ENOMEM;
787
788         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
789         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
790         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
791         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
792         req->iff = idx;
793         memset(req->src_mac1, 0, sizeof(req->src_mac1));
794         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
795         skb->priority = 1;
796         offload_tx(&adapter->tdev, skb);
797         return 0;
798 }
799
800 static int init_smt(struct adapter *adapter)
801 {
802         int i;
803
804         for_each_port(adapter, i)
805             write_smt_entry(adapter, i);
806         return 0;
807 }
808
809 static void init_port_mtus(struct adapter *adapter)
810 {
811         unsigned int mtus = adapter->port[0]->mtu;
812
813         if (adapter->port[1])
814                 mtus |= adapter->port[1]->mtu << 16;
815         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
816 }
817
818 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
819                               int hi, int port)
820 {
821         struct sk_buff *skb;
822         struct mngt_pktsched_wr *req;
823         int ret;
824
825         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
826         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
827         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
828         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
829         req->sched = sched;
830         req->idx = qidx;
831         req->min = lo;
832         req->max = hi;
833         req->binding = port;
834         ret = t3_mgmt_tx(adap, skb);
835
836         return ret;
837 }
838
839 static int bind_qsets(struct adapter *adap)
840 {
841         int i, j, err = 0;
842
843         for_each_port(adap, i) {
844                 const struct port_info *pi = adap2pinfo(adap, i);
845
846                 for (j = 0; j < pi->nqsets; ++j) {
847                         int ret = send_pktsched_cmd(adap, 1,
848                                                     pi->first_qset + j, -1,
849                                                     -1, i);
850                         if (ret)
851                                 err = ret;
852                 }
853         }
854
855         return err;
856 }
857
858 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
859 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
860
861 static int upgrade_fw(struct adapter *adap)
862 {
863         int ret;
864         char buf[64];
865         const struct firmware *fw;
866         struct device *dev = &adap->pdev->dev;
867
868         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
869                  FW_VERSION_MINOR, FW_VERSION_MICRO);
870         ret = request_firmware(&fw, buf, dev);
871         if (ret < 0) {
872                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
873                         buf);
874                 return ret;
875         }
876         ret = t3_load_fw(adap, fw->data, fw->size);
877         release_firmware(fw);
878
879         if (ret == 0)
880                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
881                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
882         else
883                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
884                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
885
886         return ret;
887 }
888
889 static inline char t3rev2char(struct adapter *adapter)
890 {
891         char rev = 0;
892
893         switch(adapter->params.rev) {
894         case T3_REV_B:
895         case T3_REV_B2:
896                 rev = 'b';
897                 break;
898         case T3_REV_C:
899                 rev = 'c';
900                 break;
901         }
902         return rev;
903 }
904
905 static int update_tpsram(struct adapter *adap)
906 {
907         const struct firmware *tpsram;
908         char buf[64];
909         struct device *dev = &adap->pdev->dev;
910         int ret;
911         char rev;
912
913         rev = t3rev2char(adap);
914         if (!rev)
915                 return 0;
916
917         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
918                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
919
920         ret = request_firmware(&tpsram, buf, dev);
921         if (ret < 0) {
922                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
923                         buf);
924                 return ret;
925         }
926
927         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
928         if (ret)
929                 goto release_tpsram;
930
931         ret = t3_set_proto_sram(adap, tpsram->data);
932         if (ret == 0)
933                 dev_info(dev,
934                          "successful update of protocol engine "
935                          "to %d.%d.%d\n",
936                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
937         else
938                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
939                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
940         if (ret)
941                 dev_err(dev, "loading protocol SRAM failed\n");
942
943 release_tpsram:
944         release_firmware(tpsram);
945
946         return ret;
947 }
948
949 /**
950  *      cxgb_up - enable the adapter
951  *      @adapter: adapter being enabled
952  *
953  *      Called when the first port is enabled, this function performs the
954  *      actions necessary to make an adapter operational, such as completing
955  *      the initialization of HW modules, and enabling interrupts.
956  *
957  *      Must be called with the rtnl lock held.
958  */
959 static int cxgb_up(struct adapter *adap)
960 {
961         int err;
962
963         if (!(adap->flags & FULL_INIT_DONE)) {
964                 err = t3_check_fw_version(adap);
965                 if (err == -EINVAL) {
966                         err = upgrade_fw(adap);
967                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
968                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
969                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
970                 }
971
972                 err = t3_check_tpsram_version(adap);
973                 if (err == -EINVAL) {
974                         err = update_tpsram(adap);
975                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
976                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
977                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
978                 }
979
980                 /*
981                  * Clear interrupts now to catch errors if t3_init_hw fails.
982                  * We clear them again later as initialization may trigger
983                  * conditions that can interrupt.
984                  */
985                 t3_intr_clear(adap);
986
987                 err = t3_init_hw(adap, 0);
988                 if (err)
989                         goto out;
990
991                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
992                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
993
994                 err = setup_sge_qsets(adap);
995                 if (err)
996                         goto out;
997
998                 setup_rss(adap);
999                 if (!(adap->flags & NAPI_INIT))
1000                         init_napi(adap);
1001                 adap->flags |= FULL_INIT_DONE;
1002         }
1003
1004         t3_intr_clear(adap);
1005
1006         if (adap->flags & USING_MSIX) {
1007                 name_msix_vecs(adap);
1008                 err = request_irq(adap->msix_info[0].vec,
1009                                   t3_async_intr_handler, 0,
1010                                   adap->msix_info[0].desc, adap);
1011                 if (err)
1012                         goto irq_err;
1013
1014                 err = request_msix_data_irqs(adap);
1015                 if (err) {
1016                         free_irq(adap->msix_info[0].vec, adap);
1017                         goto irq_err;
1018                 }
1019         } else if ((err = request_irq(adap->pdev->irq,
1020                                       t3_intr_handler(adap,
1021                                                       adap->sge.qs[0].rspq.
1022                                                       polling),
1023                                       (adap->flags & USING_MSI) ?
1024                                        0 : IRQF_SHARED,
1025                                       adap->name, adap)))
1026                 goto irq_err;
1027
1028         enable_all_napi(adap);
1029         t3_sge_start(adap);
1030         t3_intr_enable(adap);
1031
1032         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1033             is_offload(adap) && init_tp_parity(adap) == 0)
1034                 adap->flags |= TP_PARITY_INIT;
1035
1036         if (adap->flags & TP_PARITY_INIT) {
1037                 t3_write_reg(adap, A_TP_INT_CAUSE,
1038                              F_CMCACHEPERR | F_ARPLUTPERR);
1039                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1040         }
1041
1042         if (!(adap->flags & QUEUES_BOUND)) {
1043                 err = bind_qsets(adap);
1044                 if (err) {
1045                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1046                         t3_intr_disable(adap);
1047                         free_irq_resources(adap);
1048                         goto out;
1049                 }
1050                 adap->flags |= QUEUES_BOUND;
1051         }
1052
1053 out:
1054         return err;
1055 irq_err:
1056         CH_ERR(adap, "request_irq failed, err %d\n", err);
1057         goto out;
1058 }
1059
1060 /*
1061  * Release resources when all the ports and offloading have been stopped.
1062  */
1063 static void cxgb_down(struct adapter *adapter)
1064 {
1065         t3_sge_stop(adapter);
1066         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1067         t3_intr_disable(adapter);
1068         spin_unlock_irq(&adapter->work_lock);
1069
1070         free_irq_resources(adapter);
1071         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1072         quiesce_rx(adapter);
1073 }
1074
1075 static void schedule_chk_task(struct adapter *adap)
1076 {
1077         unsigned int timeo;
1078
1079         timeo = adap->params.linkpoll_period ?
1080             (HZ * adap->params.linkpoll_period) / 10 :
1081             adap->params.stats_update_period * HZ;
1082         if (timeo)
1083                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1084 }
1085
1086 static int offload_open(struct net_device *dev)
1087 {
1088         struct port_info *pi = netdev_priv(dev);
1089         struct adapter *adapter = pi->adapter;
1090         struct t3cdev *tdev = dev2t3cdev(dev);
1091         int adap_up = adapter->open_device_map & PORT_MASK;
1092         int err;
1093
1094         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1095                 return 0;
1096
1097         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1098                 goto out;
1099
1100         t3_tp_set_offload_mode(adapter, 1);
1101         tdev->lldev = adapter->port[0];
1102         err = cxgb3_offload_activate(adapter);
1103         if (err)
1104                 goto out;
1105
1106         init_port_mtus(adapter);
1107         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1108                      adapter->params.b_wnd,
1109                      adapter->params.rev == 0 ?
1110                      adapter->port[0]->mtu : 0xffff);
1111         init_smt(adapter);
1112
1113         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1114                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1115
1116         /* Call back all registered clients */
1117         cxgb3_add_clients(tdev);
1118
1119 out:
1120         /* restore them in case the offload module has changed them */
1121         if (err) {
1122                 t3_tp_set_offload_mode(adapter, 0);
1123                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1124                 cxgb3_set_dummy_ops(tdev);
1125         }
1126         return err;
1127 }
1128
1129 static int offload_close(struct t3cdev *tdev)
1130 {
1131         struct adapter *adapter = tdev2adap(tdev);
1132
1133         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1134                 return 0;
1135
1136         /* Call back all registered clients */
1137         cxgb3_remove_clients(tdev);
1138
1139         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1140
1141         tdev->lldev = NULL;
1142         cxgb3_set_dummy_ops(tdev);
1143         t3_tp_set_offload_mode(adapter, 0);
1144         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1145
1146         if (!adapter->open_device_map)
1147                 cxgb_down(adapter);
1148
1149         cxgb3_offload_deactivate(adapter);
1150         return 0;
1151 }
1152
1153 static int cxgb_open(struct net_device *dev)
1154 {
1155         struct port_info *pi = netdev_priv(dev);
1156         struct adapter *adapter = pi->adapter;
1157         int other_ports = adapter->open_device_map & PORT_MASK;
1158         int err;
1159
1160         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1161                 return err;
1162
1163         set_bit(pi->port_id, &adapter->open_device_map);
1164         if (is_offload(adapter) && !ofld_disable) {
1165                 err = offload_open(dev);
1166                 if (err)
1167                         printk(KERN_WARNING
1168                                "Could not initialize offload capabilities\n");
1169         }
1170
1171         dev->real_num_tx_queues = pi->nqsets;
1172         link_start(dev);
1173         t3_port_intr_enable(adapter, pi->port_id);
1174         netif_tx_start_all_queues(dev);
1175         if (!other_ports)
1176                 schedule_chk_task(adapter);
1177
1178         return 0;
1179 }
1180
1181 static int cxgb_close(struct net_device *dev)
1182 {
1183         struct port_info *pi = netdev_priv(dev);
1184         struct adapter *adapter = pi->adapter;
1185
1186         t3_port_intr_disable(adapter, pi->port_id);
1187         netif_tx_stop_all_queues(dev);
1188         pi->phy.ops->power_down(&pi->phy, 1);
1189         netif_carrier_off(dev);
1190         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1191
1192         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1193         clear_bit(pi->port_id, &adapter->open_device_map);
1194         spin_unlock_irq(&adapter->work_lock);
1195
1196         if (!(adapter->open_device_map & PORT_MASK))
1197                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1198                                                   &adapter->adap_check_task);
1199
1200         if (!adapter->open_device_map)
1201                 cxgb_down(adapter);
1202
1203         return 0;
1204 }
1205
1206 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1207 {
1208         struct port_info *pi = netdev_priv(dev);
1209         struct adapter *adapter = pi->adapter;
1210         struct net_device_stats *ns = &pi->netstats;
1211         const struct mac_stats *pstats;
1212
1213         spin_lock(&adapter->stats_lock);
1214         pstats = t3_mac_update_stats(&pi->mac);
1215         spin_unlock(&adapter->stats_lock);
1216
1217         ns->tx_bytes = pstats->tx_octets;
1218         ns->tx_packets = pstats->tx_frames;
1219         ns->rx_bytes = pstats->rx_octets;
1220         ns->rx_packets = pstats->rx_frames;
1221         ns->multicast = pstats->rx_mcast_frames;
1222
1223         ns->tx_errors = pstats->tx_underrun;
1224         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1225             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1226             pstats->rx_fifo_ovfl;
1227
1228         /* detailed rx_errors */
1229         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1230         ns->rx_over_errors = 0;
1231         ns->rx_crc_errors = pstats->rx_fcs_errs;
1232         ns->rx_frame_errors = pstats->rx_symbol_errs;
1233         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1234         ns->rx_missed_errors = pstats->rx_cong_drops;
1235
1236         /* detailed tx_errors */
1237         ns->tx_aborted_errors = 0;
1238         ns->tx_carrier_errors = 0;
1239         ns->tx_fifo_errors = pstats->tx_underrun;
1240         ns->tx_heartbeat_errors = 0;
1241         ns->tx_window_errors = 0;
1242         return ns;
1243 }
1244
1245 static u32 get_msglevel(struct net_device *dev)
1246 {
1247         struct port_info *pi = netdev_priv(dev);
1248         struct adapter *adapter = pi->adapter;
1249
1250         return adapter->msg_enable;
1251 }
1252
1253 static void set_msglevel(struct net_device *dev, u32 val)
1254 {
1255         struct port_info *pi = netdev_priv(dev);
1256         struct adapter *adapter = pi->adapter;
1257
1258         adapter->msg_enable = val;
1259 }
1260
1261 static char stats_strings[][ETH_GSTRING_LEN] = {
1262         "TxOctetsOK         ",
1263         "TxFramesOK         ",
1264         "TxMulticastFramesOK",
1265         "TxBroadcastFramesOK",
1266         "TxPauseFrames      ",
1267         "TxUnderrun         ",
1268         "TxExtUnderrun      ",
1269
1270         "TxFrames64         ",
1271         "TxFrames65To127    ",
1272         "TxFrames128To255   ",
1273         "TxFrames256To511   ",
1274         "TxFrames512To1023  ",
1275         "TxFrames1024To1518 ",
1276         "TxFrames1519ToMax  ",
1277
1278         "RxOctetsOK         ",
1279         "RxFramesOK         ",
1280         "RxMulticastFramesOK",
1281         "RxBroadcastFramesOK",
1282         "RxPauseFrames      ",
1283         "RxFCSErrors        ",
1284         "RxSymbolErrors     ",
1285         "RxShortErrors      ",
1286         "RxJabberErrors     ",
1287         "RxLengthErrors     ",
1288         "RxFIFOoverflow     ",
1289
1290         "RxFrames64         ",
1291         "RxFrames65To127    ",
1292         "RxFrames128To255   ",
1293         "RxFrames256To511   ",
1294         "RxFrames512To1023  ",
1295         "RxFrames1024To1518 ",
1296         "RxFrames1519ToMax  ",
1297
1298         "PhyFIFOErrors      ",
1299         "TSO                ",
1300         "VLANextractions    ",
1301         "VLANinsertions     ",
1302         "TxCsumOffload      ",
1303         "RxCsumGood         ",
1304         "LroAggregated      ",
1305         "LroFlushed         ",
1306         "LroNoDesc          ",
1307         "RxDrops            ",
1308
1309         "CheckTXEnToggled   ",
1310         "CheckResets        ",
1311
1312 };
1313
1314 static int get_sset_count(struct net_device *dev, int sset)
1315 {
1316         switch (sset) {
1317         case ETH_SS_STATS:
1318                 return ARRAY_SIZE(stats_strings);
1319         default:
1320                 return -EOPNOTSUPP;
1321         }
1322 }
1323
1324 #define T3_REGMAP_SIZE (3 * 1024)
1325
1326 static int get_regs_len(struct net_device *dev)
1327 {
1328         return T3_REGMAP_SIZE;
1329 }
1330
1331 static int get_eeprom_len(struct net_device *dev)
1332 {
1333         return EEPROMSIZE;
1334 }
1335
1336 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1337 {
1338         struct port_info *pi = netdev_priv(dev);
1339         struct adapter *adapter = pi->adapter;
1340         u32 fw_vers = 0;
1341         u32 tp_vers = 0;
1342
1343         spin_lock(&adapter->stats_lock);
1344         t3_get_fw_version(adapter, &fw_vers);
1345         t3_get_tp_version(adapter, &tp_vers);
1346         spin_unlock(&adapter->stats_lock);
1347
1348         strcpy(info->driver, DRV_NAME);
1349         strcpy(info->version, DRV_VERSION);
1350         strcpy(info->bus_info, pci_name(adapter->pdev));
1351         if (!fw_vers)
1352                 strcpy(info->fw_version, "N/A");
1353         else {
1354                 snprintf(info->fw_version, sizeof(info->fw_version),
1355                          "%s %u.%u.%u TP %u.%u.%u",
1356                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1357                          G_FW_VERSION_MAJOR(fw_vers),
1358                          G_FW_VERSION_MINOR(fw_vers),
1359                          G_FW_VERSION_MICRO(fw_vers),
1360                          G_TP_VERSION_MAJOR(tp_vers),
1361                          G_TP_VERSION_MINOR(tp_vers),
1362                          G_TP_VERSION_MICRO(tp_vers));
1363         }
1364 }
1365
1366 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1367 {
1368         if (stringset == ETH_SS_STATS)
1369                 memcpy(data, stats_strings, sizeof(stats_strings));
1370 }
1371
1372 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1373                                             struct port_info *p, int idx)
1374 {
1375         int i;
1376         unsigned long tot = 0;
1377
1378         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1379                 tot += adapter->sge.qs[i].port_stats[idx];
1380         return tot;
1381 }
1382
1383 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1384                       u64 *data)
1385 {
1386         struct port_info *pi = netdev_priv(dev);
1387         struct adapter *adapter = pi->adapter;
1388         const struct mac_stats *s;
1389
1390         spin_lock(&adapter->stats_lock);
1391         s = t3_mac_update_stats(&pi->mac);
1392         spin_unlock(&adapter->stats_lock);
1393
1394         *data++ = s->tx_octets;
1395         *data++ = s->tx_frames;
1396         *data++ = s->tx_mcast_frames;
1397         *data++ = s->tx_bcast_frames;
1398         *data++ = s->tx_pause;
1399         *data++ = s->tx_underrun;
1400         *data++ = s->tx_fifo_urun;
1401
1402         *data++ = s->tx_frames_64;
1403         *data++ = s->tx_frames_65_127;
1404         *data++ = s->tx_frames_128_255;
1405         *data++ = s->tx_frames_256_511;
1406         *data++ = s->tx_frames_512_1023;
1407         *data++ = s->tx_frames_1024_1518;
1408         *data++ = s->tx_frames_1519_max;
1409
1410         *data++ = s->rx_octets;
1411         *data++ = s->rx_frames;
1412         *data++ = s->rx_mcast_frames;
1413         *data++ = s->rx_bcast_frames;
1414         *data++ = s->rx_pause;
1415         *data++ = s->rx_fcs_errs;
1416         *data++ = s->rx_symbol_errs;
1417         *data++ = s->rx_short;
1418         *data++ = s->rx_jabber;
1419         *data++ = s->rx_too_long;
1420         *data++ = s->rx_fifo_ovfl;
1421
1422         *data++ = s->rx_frames_64;
1423         *data++ = s->rx_frames_65_127;
1424         *data++ = s->rx_frames_128_255;
1425         *data++ = s->rx_frames_256_511;
1426         *data++ = s->rx_frames_512_1023;
1427         *data++ = s->rx_frames_1024_1518;
1428         *data++ = s->rx_frames_1519_max;
1429
1430         *data++ = pi->phy.fifo_errors;
1431
1432         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1433         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1434         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1435         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1436         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1437         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1438         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1439         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1440         *data++ = s->rx_cong_drops;
1441
1442         *data++ = s->num_toggled;
1443         *data++ = s->num_resets;
1444 }
1445
1446 static inline void reg_block_dump(struct adapter *ap, void *buf,
1447                                   unsigned int start, unsigned int end)
1448 {
1449         u32 *p = buf + start;
1450
1451         for (; start <= end; start += sizeof(u32))
1452                 *p++ = t3_read_reg(ap, start);
1453 }
1454
1455 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1456                      void *buf)
1457 {
1458         struct port_info *pi = netdev_priv(dev);
1459         struct adapter *ap = pi->adapter;
1460
1461         /*
1462          * Version scheme:
1463          * bits 0..9: chip version
1464          * bits 10..15: chip revision
1465          * bit 31: set for PCIe cards
1466          */
1467         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1468
1469         /*
1470          * We skip the MAC statistics registers because they are clear-on-read.
1471          * Also reading multi-register stats would need to synchronize with the
1472          * periodic mac stats accumulation.  Hard to justify the complexity.
1473          */
1474         memset(buf, 0, T3_REGMAP_SIZE);
1475         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1476         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1477         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1478         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1479         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1480         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1481                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1482         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1483                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1484 }
1485
1486 static int restart_autoneg(struct net_device *dev)
1487 {
1488         struct port_info *p = netdev_priv(dev);
1489
1490         if (!netif_running(dev))
1491                 return -EAGAIN;
1492         if (p->link_config.autoneg != AUTONEG_ENABLE)
1493                 return -EINVAL;
1494         p->phy.ops->autoneg_restart(&p->phy);
1495         return 0;
1496 }
1497
1498 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1499 {
1500         struct port_info *pi = netdev_priv(dev);
1501         struct adapter *adapter = pi->adapter;
1502         int i;
1503
1504         if (data == 0)
1505                 data = 2;
1506
1507         for (i = 0; i < data * 2; i++) {
1508                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1509                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1510                 if (msleep_interruptible(500))
1511                         break;
1512         }
1513         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1514                          F_GPIO0_OUT_VAL);
1515         return 0;
1516 }
1517
1518 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1519 {
1520         struct port_info *p = netdev_priv(dev);
1521
1522         cmd->supported = p->link_config.supported;
1523         cmd->advertising = p->link_config.advertising;
1524
1525         if (netif_carrier_ok(dev)) {
1526                 cmd->speed = p->link_config.speed;
1527                 cmd->duplex = p->link_config.duplex;
1528         } else {
1529                 cmd->speed = -1;
1530                 cmd->duplex = -1;
1531         }
1532
1533         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1534         cmd->phy_address = p->phy.addr;
1535         cmd->transceiver = XCVR_EXTERNAL;
1536         cmd->autoneg = p->link_config.autoneg;
1537         cmd->maxtxpkt = 0;
1538         cmd->maxrxpkt = 0;
1539         return 0;
1540 }
1541
1542 static int speed_duplex_to_caps(int speed, int duplex)
1543 {
1544         int cap = 0;
1545
1546         switch (speed) {
1547         case SPEED_10:
1548                 if (duplex == DUPLEX_FULL)
1549                         cap = SUPPORTED_10baseT_Full;
1550                 else
1551                         cap = SUPPORTED_10baseT_Half;
1552                 break;
1553         case SPEED_100:
1554                 if (duplex == DUPLEX_FULL)
1555                         cap = SUPPORTED_100baseT_Full;
1556                 else
1557                         cap = SUPPORTED_100baseT_Half;
1558                 break;
1559         case SPEED_1000:
1560                 if (duplex == DUPLEX_FULL)
1561                         cap = SUPPORTED_1000baseT_Full;
1562                 else
1563                         cap = SUPPORTED_1000baseT_Half;
1564                 break;
1565         case SPEED_10000:
1566                 if (duplex == DUPLEX_FULL)
1567                         cap = SUPPORTED_10000baseT_Full;
1568         }
1569         return cap;
1570 }
1571
1572 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1573                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1574                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1575                       ADVERTISED_10000baseT_Full)
1576
1577 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1578 {
1579         int cap;
1580         struct port_info *p = netdev_priv(dev);
1581         struct link_config *lc = &p->link_config;
1582
1583         if (!(lc->supported & SUPPORTED_Autoneg)) {
1584                 /*
1585                  * PHY offers a single speed/duplex.  See if that's what's
1586                  * being requested.
1587                  */
1588                 if (cmd->autoneg == AUTONEG_DISABLE) {
1589                         cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1590                         if (lc->supported & cap)
1591                                 return 0;
1592                 }
1593                 return -EINVAL;
1594         }
1595
1596         if (cmd->autoneg == AUTONEG_DISABLE) {
1597                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1598
1599                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1600                         return -EINVAL;
1601                 lc->requested_speed = cmd->speed;
1602                 lc->requested_duplex = cmd->duplex;
1603                 lc->advertising = 0;
1604         } else {
1605                 cmd->advertising &= ADVERTISED_MASK;
1606                 cmd->advertising &= lc->supported;
1607                 if (!cmd->advertising)
1608                         return -EINVAL;
1609                 lc->requested_speed = SPEED_INVALID;
1610                 lc->requested_duplex = DUPLEX_INVALID;
1611                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1612         }
1613         lc->autoneg = cmd->autoneg;
1614         if (netif_running(dev))
1615                 t3_link_start(&p->phy, &p->mac, lc);
1616         return 0;
1617 }
1618
1619 static void get_pauseparam(struct net_device *dev,
1620                            struct ethtool_pauseparam *epause)
1621 {
1622         struct port_info *p = netdev_priv(dev);
1623
1624         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1625         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1626         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1627 }
1628
1629 static int set_pauseparam(struct net_device *dev,
1630                           struct ethtool_pauseparam *epause)
1631 {
1632         struct port_info *p = netdev_priv(dev);
1633         struct link_config *lc = &p->link_config;
1634
1635         if (epause->autoneg == AUTONEG_DISABLE)
1636                 lc->requested_fc = 0;
1637         else if (lc->supported & SUPPORTED_Autoneg)
1638                 lc->requested_fc = PAUSE_AUTONEG;
1639         else
1640                 return -EINVAL;
1641
1642         if (epause->rx_pause)
1643                 lc->requested_fc |= PAUSE_RX;
1644         if (epause->tx_pause)
1645                 lc->requested_fc |= PAUSE_TX;
1646         if (lc->autoneg == AUTONEG_ENABLE) {
1647                 if (netif_running(dev))
1648                         t3_link_start(&p->phy, &p->mac, lc);
1649         } else {
1650                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1651                 if (netif_running(dev))
1652                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1653         }
1654         return 0;
1655 }
1656
1657 static u32 get_rx_csum(struct net_device *dev)
1658 {
1659         struct port_info *p = netdev_priv(dev);
1660
1661         return p->rx_offload & T3_RX_CSUM;
1662 }
1663
1664 static int set_rx_csum(struct net_device *dev, u32 data)
1665 {
1666         struct port_info *p = netdev_priv(dev);
1667
1668         if (data) {
1669                 p->rx_offload |= T3_RX_CSUM;
1670         } else {
1671                 int i;
1672
1673                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1674                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1675                         set_qset_lro(dev, i, 0);
1676         }
1677         return 0;
1678 }
1679
1680 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1681 {
1682         struct port_info *pi = netdev_priv(dev);
1683         struct adapter *adapter = pi->adapter;
1684         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1685
1686         e->rx_max_pending = MAX_RX_BUFFERS;
1687         e->rx_mini_max_pending = 0;
1688         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1689         e->tx_max_pending = MAX_TXQ_ENTRIES;
1690
1691         e->rx_pending = q->fl_size;
1692         e->rx_mini_pending = q->rspq_size;
1693         e->rx_jumbo_pending = q->jumbo_size;
1694         e->tx_pending = q->txq_size[0];
1695 }
1696
1697 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1698 {
1699         struct port_info *pi = netdev_priv(dev);
1700         struct adapter *adapter = pi->adapter;
1701         struct qset_params *q;
1702         int i;
1703
1704         if (e->rx_pending > MAX_RX_BUFFERS ||
1705             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1706             e->tx_pending > MAX_TXQ_ENTRIES ||
1707             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1708             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1709             e->rx_pending < MIN_FL_ENTRIES ||
1710             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1711             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1712                 return -EINVAL;
1713
1714         if (adapter->flags & FULL_INIT_DONE)
1715                 return -EBUSY;
1716
1717         q = &adapter->params.sge.qset[pi->first_qset];
1718         for (i = 0; i < pi->nqsets; ++i, ++q) {
1719                 q->rspq_size = e->rx_mini_pending;
1720                 q->fl_size = e->rx_pending;
1721                 q->jumbo_size = e->rx_jumbo_pending;
1722                 q->txq_size[0] = e->tx_pending;
1723                 q->txq_size[1] = e->tx_pending;
1724                 q->txq_size[2] = e->tx_pending;
1725         }
1726         return 0;
1727 }
1728
1729 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1730 {
1731         struct port_info *pi = netdev_priv(dev);
1732         struct adapter *adapter = pi->adapter;
1733         struct qset_params *qsp = &adapter->params.sge.qset[0];
1734         struct sge_qset *qs = &adapter->sge.qs[0];
1735
1736         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1737                 return -EINVAL;
1738
1739         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1740         t3_update_qset_coalesce(qs, qsp);
1741         return 0;
1742 }
1743
1744 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1745 {
1746         struct port_info *pi = netdev_priv(dev);
1747         struct adapter *adapter = pi->adapter;
1748         struct qset_params *q = adapter->params.sge.qset;
1749
1750         c->rx_coalesce_usecs = q->coalesce_usecs;
1751         return 0;
1752 }
1753
1754 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1755                       u8 * data)
1756 {
1757         struct port_info *pi = netdev_priv(dev);
1758         struct adapter *adapter = pi->adapter;
1759         int i, err = 0;
1760
1761         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1762         if (!buf)
1763                 return -ENOMEM;
1764
1765         e->magic = EEPROM_MAGIC;
1766         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1767                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1768
1769         if (!err)
1770                 memcpy(data, buf + e->offset, e->len);
1771         kfree(buf);
1772         return err;
1773 }
1774
1775 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1776                       u8 * data)
1777 {
1778         struct port_info *pi = netdev_priv(dev);
1779         struct adapter *adapter = pi->adapter;
1780         u32 aligned_offset, aligned_len;
1781         __le32 *p;
1782         u8 *buf;
1783         int err;
1784
1785         if (eeprom->magic != EEPROM_MAGIC)
1786                 return -EINVAL;
1787
1788         aligned_offset = eeprom->offset & ~3;
1789         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1790
1791         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1792                 buf = kmalloc(aligned_len, GFP_KERNEL);
1793                 if (!buf)
1794                         return -ENOMEM;
1795                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1796                 if (!err && aligned_len > 4)
1797                         err = t3_seeprom_read(adapter,
1798                                               aligned_offset + aligned_len - 4,
1799                                               (__le32 *) & buf[aligned_len - 4]);
1800                 if (err)
1801                         goto out;
1802                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1803         } else
1804                 buf = data;
1805
1806         err = t3_seeprom_wp(adapter, 0);
1807         if (err)
1808                 goto out;
1809
1810         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1811                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1812                 aligned_offset += 4;
1813         }
1814
1815         if (!err)
1816                 err = t3_seeprom_wp(adapter, 1);
1817 out:
1818         if (buf != data)
1819                 kfree(buf);
1820         return err;
1821 }
1822
1823 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1824 {
1825         wol->supported = 0;
1826         wol->wolopts = 0;
1827         memset(&wol->sopass, 0, sizeof(wol->sopass));
1828 }
1829
1830 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1831 {
1832         struct port_info *pi = netdev_priv(dev);
1833         int i;
1834
1835         if (data & ETH_FLAG_LRO) {
1836                 if (!(pi->rx_offload & T3_RX_CSUM))
1837                         return -EINVAL;
1838
1839                 pi->rx_offload |= T3_LRO;
1840                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1841                         set_qset_lro(dev, i, 1);
1842
1843         } else {
1844                 pi->rx_offload &= ~T3_LRO;
1845                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1846                         set_qset_lro(dev, i, 0);
1847         }
1848
1849         return 0;
1850 }
1851
1852 static const struct ethtool_ops cxgb_ethtool_ops = {
1853         .get_settings = get_settings,
1854         .set_settings = set_settings,
1855         .get_drvinfo = get_drvinfo,
1856         .get_msglevel = get_msglevel,
1857         .set_msglevel = set_msglevel,
1858         .get_ringparam = get_sge_param,
1859         .set_ringparam = set_sge_param,
1860         .get_coalesce = get_coalesce,
1861         .set_coalesce = set_coalesce,
1862         .get_eeprom_len = get_eeprom_len,
1863         .get_eeprom = get_eeprom,
1864         .set_eeprom = set_eeprom,
1865         .get_pauseparam = get_pauseparam,
1866         .set_pauseparam = set_pauseparam,
1867         .get_rx_csum = get_rx_csum,
1868         .set_rx_csum = set_rx_csum,
1869         .set_tx_csum = ethtool_op_set_tx_csum,
1870         .set_sg = ethtool_op_set_sg,
1871         .get_link = ethtool_op_get_link,
1872         .get_strings = get_strings,
1873         .phys_id = cxgb3_phys_id,
1874         .nway_reset = restart_autoneg,
1875         .get_sset_count = get_sset_count,
1876         .get_ethtool_stats = get_stats,
1877         .get_regs_len = get_regs_len,
1878         .get_regs = get_regs,
1879         .get_wol = get_wol,
1880         .set_tso = ethtool_op_set_tso,
1881         .get_flags = ethtool_op_get_flags,
1882         .set_flags = cxgb3_set_flags,
1883 };
1884
1885 static int in_range(int val, int lo, int hi)
1886 {
1887         return val < 0 || (val <= hi && val >= lo);
1888 }
1889
1890 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1891 {
1892         struct port_info *pi = netdev_priv(dev);
1893         struct adapter *adapter = pi->adapter;
1894         u32 cmd;
1895         int ret;
1896
1897         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1898                 return -EFAULT;
1899
1900         switch (cmd) {
1901         case CHELSIO_SET_QSET_PARAMS:{
1902                 int i;
1903                 struct qset_params *q;
1904                 struct ch_qset_params t;
1905                 int q1 = pi->first_qset;
1906                 int nqsets = pi->nqsets;
1907
1908                 if (!capable(CAP_NET_ADMIN))
1909                         return -EPERM;
1910                 if (copy_from_user(&t, useraddr, sizeof(t)))
1911                         return -EFAULT;
1912                 if (t.qset_idx >= SGE_QSETS)
1913                         return -EINVAL;
1914                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1915                         !in_range(t.cong_thres, 0, 255) ||
1916                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1917                                 MAX_TXQ_ENTRIES) ||
1918                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1919                                 MAX_TXQ_ENTRIES) ||
1920                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1921                                 MAX_CTRL_TXQ_ENTRIES) ||
1922                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1923                                 MAX_RX_BUFFERS)
1924                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1925                                         MAX_RX_JUMBO_BUFFERS)
1926                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1927                                         MAX_RSPQ_ENTRIES))
1928                         return -EINVAL;
1929
1930                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1931                         for_each_port(adapter, i) {
1932                                 pi = adap2pinfo(adapter, i);
1933                                 if (t.qset_idx >= pi->first_qset &&
1934                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1935                                     !(pi->rx_offload & T3_RX_CSUM))
1936                                         return -EINVAL;
1937                         }
1938
1939                 if ((adapter->flags & FULL_INIT_DONE) &&
1940                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1941                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1942                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1943                         t.polling >= 0 || t.cong_thres >= 0))
1944                         return -EBUSY;
1945
1946                 /* Allow setting of any available qset when offload enabled */
1947                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1948                         q1 = 0;
1949                         for_each_port(adapter, i) {
1950                                 pi = adap2pinfo(adapter, i);
1951                                 nqsets += pi->first_qset + pi->nqsets;
1952                         }
1953                 }
1954
1955                 if (t.qset_idx < q1)
1956                         return -EINVAL;
1957                 if (t.qset_idx > q1 + nqsets - 1)
1958                         return -EINVAL;
1959
1960                 q = &adapter->params.sge.qset[t.qset_idx];
1961
1962                 if (t.rspq_size >= 0)
1963                         q->rspq_size = t.rspq_size;
1964                 if (t.fl_size[0] >= 0)
1965                         q->fl_size = t.fl_size[0];
1966                 if (t.fl_size[1] >= 0)
1967                         q->jumbo_size = t.fl_size[1];
1968                 if (t.txq_size[0] >= 0)
1969                         q->txq_size[0] = t.txq_size[0];
1970                 if (t.txq_size[1] >= 0)
1971                         q->txq_size[1] = t.txq_size[1];
1972                 if (t.txq_size[2] >= 0)
1973                         q->txq_size[2] = t.txq_size[2];
1974                 if (t.cong_thres >= 0)
1975                         q->cong_thres = t.cong_thres;
1976                 if (t.intr_lat >= 0) {
1977                         struct sge_qset *qs =
1978                                 &adapter->sge.qs[t.qset_idx];
1979
1980                         q->coalesce_usecs = t.intr_lat;
1981                         t3_update_qset_coalesce(qs, q);
1982                 }
1983                 if (t.polling >= 0) {
1984                         if (adapter->flags & USING_MSIX)
1985                                 q->polling = t.polling;
1986                         else {
1987                                 /* No polling with INTx for T3A */
1988                                 if (adapter->params.rev == 0 &&
1989                                         !(adapter->flags & USING_MSI))
1990                                         t.polling = 0;
1991
1992                                 for (i = 0; i < SGE_QSETS; i++) {
1993                                         q = &adapter->params.sge.
1994                                                 qset[i];
1995                                         q->polling = t.polling;
1996                                 }
1997                         }
1998                 }
1999                 if (t.lro >= 0)
2000                         set_qset_lro(dev, t.qset_idx, t.lro);
2001
2002                 break;
2003         }
2004         case CHELSIO_GET_QSET_PARAMS:{
2005                 struct qset_params *q;
2006                 struct ch_qset_params t;
2007                 int q1 = pi->first_qset;
2008                 int nqsets = pi->nqsets;
2009                 int i;
2010
2011                 if (copy_from_user(&t, useraddr, sizeof(t)))
2012                         return -EFAULT;
2013
2014                 /* Display qsets for all ports when offload enabled */
2015                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2016                         q1 = 0;
2017                         for_each_port(adapter, i) {
2018                                 pi = adap2pinfo(adapter, i);
2019                                 nqsets = pi->first_qset + pi->nqsets;
2020                         }
2021                 }
2022
2023                 if (t.qset_idx >= nqsets)
2024                         return -EINVAL;
2025
2026                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2027                 t.rspq_size = q->rspq_size;
2028                 t.txq_size[0] = q->txq_size[0];
2029                 t.txq_size[1] = q->txq_size[1];
2030                 t.txq_size[2] = q->txq_size[2];
2031                 t.fl_size[0] = q->fl_size;
2032                 t.fl_size[1] = q->jumbo_size;
2033                 t.polling = q->polling;
2034                 t.lro = q->lro;
2035                 t.intr_lat = q->coalesce_usecs;
2036                 t.cong_thres = q->cong_thres;
2037                 t.qnum = q1;
2038
2039                 if (adapter->flags & USING_MSIX)
2040                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2041                 else
2042                         t.vector = adapter->pdev->irq;
2043
2044                 if (copy_to_user(useraddr, &t, sizeof(t)))
2045                         return -EFAULT;
2046                 break;
2047         }
2048         case CHELSIO_SET_QSET_NUM:{
2049                 struct ch_reg edata;
2050                 unsigned int i, first_qset = 0, other_qsets = 0;
2051
2052                 if (!capable(CAP_NET_ADMIN))
2053                         return -EPERM;
2054                 if (adapter->flags & FULL_INIT_DONE)
2055                         return -EBUSY;
2056                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2057                         return -EFAULT;
2058                 if (edata.val < 1 ||
2059                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2060                         return -EINVAL;
2061
2062                 for_each_port(adapter, i)
2063                         if (adapter->port[i] && adapter->port[i] != dev)
2064                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2065
2066                 if (edata.val + other_qsets > SGE_QSETS)
2067                         return -EINVAL;
2068
2069                 pi->nqsets = edata.val;
2070
2071                 for_each_port(adapter, i)
2072                         if (adapter->port[i]) {
2073                                 pi = adap2pinfo(adapter, i);
2074                                 pi->first_qset = first_qset;
2075                                 first_qset += pi->nqsets;
2076                         }
2077                 break;
2078         }
2079         case CHELSIO_GET_QSET_NUM:{
2080                 struct ch_reg edata;
2081
2082                 edata.cmd = CHELSIO_GET_QSET_NUM;
2083                 edata.val = pi->nqsets;
2084                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2085                         return -EFAULT;
2086                 break;
2087         }
2088         case CHELSIO_LOAD_FW:{
2089                 u8 *fw_data;
2090                 struct ch_mem_range t;
2091
2092                 if (!capable(CAP_SYS_RAWIO))
2093                         return -EPERM;
2094                 if (copy_from_user(&t, useraddr, sizeof(t)))
2095                         return -EFAULT;
2096                 /* Check t.len sanity ? */
2097                 fw_data = kmalloc(t.len, GFP_KERNEL);
2098                 if (!fw_data)
2099                         return -ENOMEM;
2100
2101                 if (copy_from_user
2102                         (fw_data, useraddr + sizeof(t), t.len)) {
2103                         kfree(fw_data);
2104                         return -EFAULT;
2105                 }
2106
2107                 ret = t3_load_fw(adapter, fw_data, t.len);
2108                 kfree(fw_data);
2109                 if (ret)
2110                         return ret;
2111                 break;
2112         }
2113         case CHELSIO_SETMTUTAB:{
2114                 struct ch_mtus m;
2115                 int i;
2116
2117                 if (!is_offload(adapter))
2118                         return -EOPNOTSUPP;
2119                 if (!capable(CAP_NET_ADMIN))
2120                         return -EPERM;
2121                 if (offload_running(adapter))
2122                         return -EBUSY;
2123                 if (copy_from_user(&m, useraddr, sizeof(m)))
2124                         return -EFAULT;
2125                 if (m.nmtus != NMTUS)
2126                         return -EINVAL;
2127                 if (m.mtus[0] < 81)     /* accommodate SACK */
2128                         return -EINVAL;
2129
2130                 /* MTUs must be in ascending order */
2131                 for (i = 1; i < NMTUS; ++i)
2132                         if (m.mtus[i] < m.mtus[i - 1])
2133                                 return -EINVAL;
2134
2135                 memcpy(adapter->params.mtus, m.mtus,
2136                         sizeof(adapter->params.mtus));
2137                 break;
2138         }
2139         case CHELSIO_GET_PM:{
2140                 struct tp_params *p = &adapter->params.tp;
2141                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2142
2143                 if (!is_offload(adapter))
2144                         return -EOPNOTSUPP;
2145                 m.tx_pg_sz = p->tx_pg_size;
2146                 m.tx_num_pg = p->tx_num_pgs;
2147                 m.rx_pg_sz = p->rx_pg_size;
2148                 m.rx_num_pg = p->rx_num_pgs;
2149                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2150                 if (copy_to_user(useraddr, &m, sizeof(m)))
2151                         return -EFAULT;
2152                 break;
2153         }
2154         case CHELSIO_SET_PM:{
2155                 struct ch_pm m;
2156                 struct tp_params *p = &adapter->params.tp;
2157
2158                 if (!is_offload(adapter))
2159                         return -EOPNOTSUPP;
2160                 if (!capable(CAP_NET_ADMIN))
2161                         return -EPERM;
2162                 if (adapter->flags & FULL_INIT_DONE)
2163                         return -EBUSY;
2164                 if (copy_from_user(&m, useraddr, sizeof(m)))
2165                         return -EFAULT;
2166                 if (!is_power_of_2(m.rx_pg_sz) ||
2167                         !is_power_of_2(m.tx_pg_sz))
2168                         return -EINVAL; /* not power of 2 */
2169                 if (!(m.rx_pg_sz & 0x14000))
2170                         return -EINVAL; /* not 16KB or 64KB */
2171                 if (!(m.tx_pg_sz & 0x1554000))
2172                         return -EINVAL;
2173                 if (m.tx_num_pg == -1)
2174                         m.tx_num_pg = p->tx_num_pgs;
2175                 if (m.rx_num_pg == -1)
2176                         m.rx_num_pg = p->rx_num_pgs;
2177                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2178                         return -EINVAL;
2179                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2180                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2181                         return -EINVAL;
2182                 p->rx_pg_size = m.rx_pg_sz;
2183                 p->tx_pg_size = m.tx_pg_sz;
2184                 p->rx_num_pgs = m.rx_num_pg;
2185                 p->tx_num_pgs = m.tx_num_pg;
2186                 break;
2187         }
2188         case CHELSIO_GET_MEM:{
2189                 struct ch_mem_range t;
2190                 struct mc7 *mem;
2191                 u64 buf[32];
2192
2193                 if (!is_offload(adapter))
2194                         return -EOPNOTSUPP;
2195                 if (!(adapter->flags & FULL_INIT_DONE))
2196                         return -EIO;    /* need the memory controllers */
2197                 if (copy_from_user(&t, useraddr, sizeof(t)))
2198                         return -EFAULT;
2199                 if ((t.addr & 7) || (t.len & 7))
2200                         return -EINVAL;
2201                 if (t.mem_id == MEM_CM)
2202                         mem = &adapter->cm;
2203                 else if (t.mem_id == MEM_PMRX)
2204                         mem = &adapter->pmrx;
2205                 else if (t.mem_id == MEM_PMTX)
2206                         mem = &adapter->pmtx;
2207                 else
2208                         return -EINVAL;
2209
2210                 /*
2211                  * Version scheme:
2212                  * bits 0..9: chip version
2213                  * bits 10..15: chip revision
2214                  */
2215                 t.version = 3 | (adapter->params.rev << 10);
2216                 if (copy_to_user(useraddr, &t, sizeof(t)))
2217                         return -EFAULT;
2218
2219                 /*
2220                  * Read 256 bytes at a time as len can be large and we don't
2221                  * want to use huge intermediate buffers.
2222                  */
2223                 useraddr += sizeof(t);  /* advance to start of buffer */
2224                 while (t.len) {
2225                         unsigned int chunk =
2226                                 min_t(unsigned int, t.len, sizeof(buf));
2227
2228                         ret =
2229                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2230                                                 buf);
2231                         if (ret)
2232                                 return ret;
2233                         if (copy_to_user(useraddr, buf, chunk))
2234                                 return -EFAULT;
2235                         useraddr += chunk;
2236                         t.addr += chunk;
2237                         t.len -= chunk;
2238                 }
2239                 break;
2240         }
2241         case CHELSIO_SET_TRACE_FILTER:{
2242                 struct ch_trace t;
2243                 const struct trace_params *tp;
2244
2245                 if (!capable(CAP_NET_ADMIN))
2246                         return -EPERM;
2247                 if (!offload_running(adapter))
2248                         return -EAGAIN;
2249                 if (copy_from_user(&t, useraddr, sizeof(t)))
2250                         return -EFAULT;
2251
2252                 tp = (const struct trace_params *)&t.sip;
2253                 if (t.config_tx)
2254                         t3_config_trace_filter(adapter, tp, 0,
2255                                                 t.invert_match,
2256                                                 t.trace_tx);
2257                 if (t.config_rx)
2258                         t3_config_trace_filter(adapter, tp, 1,
2259                                                 t.invert_match,
2260                                                 t.trace_rx);
2261                 break;
2262         }
2263         default:
2264                 return -EOPNOTSUPP;
2265         }
2266         return 0;
2267 }
2268
2269 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2270 {
2271         struct mii_ioctl_data *data = if_mii(req);
2272         struct port_info *pi = netdev_priv(dev);
2273         struct adapter *adapter = pi->adapter;
2274         int ret, mmd;
2275
2276         switch (cmd) {
2277         case SIOCGMIIPHY:
2278                 data->phy_id = pi->phy.addr;
2279                 /* FALLTHRU */
2280         case SIOCGMIIREG:{
2281                 u32 val;
2282                 struct cphy *phy = &pi->phy;
2283
2284                 if (!phy->mdio_read)
2285                         return -EOPNOTSUPP;
2286                 if (is_10G(adapter)) {
2287                         mmd = data->phy_id >> 8;
2288                         if (!mmd)
2289                                 mmd = MDIO_DEV_PCS;
2290                         else if (mmd > MDIO_DEV_VEND2)
2291                                 return -EINVAL;
2292
2293                         ret =
2294                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2295                                                 mmd, data->reg_num, &val);
2296                 } else
2297                         ret =
2298                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2299                                                 0, data->reg_num & 0x1f,
2300                                                 &val);
2301                 if (!ret)
2302                         data->val_out = val;
2303                 break;
2304         }
2305         case SIOCSMIIREG:{
2306                 struct cphy *phy = &pi->phy;
2307
2308                 if (!capable(CAP_NET_ADMIN))
2309                         return -EPERM;
2310                 if (!phy->mdio_write)
2311                         return -EOPNOTSUPP;
2312                 if (is_10G(adapter)) {
2313                         mmd = data->phy_id >> 8;
2314                         if (!mmd)
2315                                 mmd = MDIO_DEV_PCS;
2316                         else if (mmd > MDIO_DEV_VEND2)
2317                                 return -EINVAL;
2318
2319                         ret =
2320                                 phy->mdio_write(adapter,
2321                                                 data->phy_id & 0x1f, mmd,
2322                                                 data->reg_num,
2323                                                 data->val_in);
2324                 } else
2325                         ret =
2326                                 phy->mdio_write(adapter,
2327                                                 data->phy_id & 0x1f, 0,
2328                                                 data->reg_num & 0x1f,
2329                                                 data->val_in);
2330                 break;
2331         }
2332         case SIOCCHIOCTL:
2333                 return cxgb_extension_ioctl(dev, req->ifr_data);
2334         default:
2335                 return -EOPNOTSUPP;
2336         }
2337         return ret;
2338 }
2339
2340 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2341 {
2342         struct port_info *pi = netdev_priv(dev);
2343         struct adapter *adapter = pi->adapter;
2344         int ret;
2345
2346         if (new_mtu < 81)       /* accommodate SACK */
2347                 return -EINVAL;
2348         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2349                 return ret;
2350         dev->mtu = new_mtu;
2351         init_port_mtus(adapter);
2352         if (adapter->params.rev == 0 && offload_running(adapter))
2353                 t3_load_mtus(adapter, adapter->params.mtus,
2354                              adapter->params.a_wnd, adapter->params.b_wnd,
2355                              adapter->port[0]->mtu);
2356         return 0;
2357 }
2358
2359 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2360 {
2361         struct port_info *pi = netdev_priv(dev);
2362         struct adapter *adapter = pi->adapter;
2363         struct sockaddr *addr = p;
2364
2365         if (!is_valid_ether_addr(addr->sa_data))
2366                 return -EINVAL;
2367
2368         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2369         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2370         if (offload_running(adapter))
2371                 write_smt_entry(adapter, pi->port_id);
2372         return 0;
2373 }
2374
2375 /**
2376  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2377  * @adap: the adapter
2378  * @p: the port
2379  *
2380  * Ensures that current Rx processing on any of the queues associated with
2381  * the given port completes before returning.  We do this by acquiring and
2382  * releasing the locks of the response queues associated with the port.
2383  */
2384 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2385 {
2386         int i;
2387
2388         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2389                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2390
2391                 spin_lock_irq(&q->lock);
2392                 spin_unlock_irq(&q->lock);
2393         }
2394 }
2395
2396 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2397 {
2398         struct port_info *pi = netdev_priv(dev);
2399         struct adapter *adapter = pi->adapter;
2400
2401         pi->vlan_grp = grp;
2402         if (adapter->params.rev > 0)
2403                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2404         else {
2405                 /* single control for all ports */
2406                 unsigned int i, have_vlans = 0;
2407                 for_each_port(adapter, i)
2408                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2409
2410                 t3_set_vlan_accel(adapter, 1, have_vlans);
2411         }
2412         t3_synchronize_rx(adapter, pi);
2413 }
2414
2415 #ifdef CONFIG_NET_POLL_CONTROLLER
2416 static void cxgb_netpoll(struct net_device *dev)
2417 {
2418         struct port_info *pi = netdev_priv(dev);
2419         struct adapter *adapter = pi->adapter;
2420         int qidx;
2421
2422         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2423                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2424                 void *source;
2425
2426                 if (adapter->flags & USING_MSIX)
2427                         source = qs;
2428                 else
2429                         source = adapter;
2430
2431                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2432         }
2433 }
2434 #endif
2435
2436 /*
2437  * Periodic accumulation of MAC statistics.
2438  */
2439 static void mac_stats_update(struct adapter *adapter)
2440 {
2441         int i;
2442
2443         for_each_port(adapter, i) {
2444                 struct net_device *dev = adapter->port[i];
2445                 struct port_info *p = netdev_priv(dev);
2446
2447                 if (netif_running(dev)) {
2448                         spin_lock(&adapter->stats_lock);
2449                         t3_mac_update_stats(&p->mac);
2450                         spin_unlock(&adapter->stats_lock);
2451                 }
2452         }
2453 }
2454
2455 static void check_link_status(struct adapter *adapter)
2456 {
2457         int i;
2458
2459         for_each_port(adapter, i) {
2460                 struct net_device *dev = adapter->port[i];
2461                 struct port_info *p = netdev_priv(dev);
2462
2463                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2464                         t3_link_changed(adapter, i);
2465         }
2466 }
2467
2468 static void check_t3b2_mac(struct adapter *adapter)
2469 {
2470         int i;
2471
2472         if (!rtnl_trylock())    /* synchronize with ifdown */
2473                 return;
2474
2475         for_each_port(adapter, i) {
2476                 struct net_device *dev = adapter->port[i];
2477                 struct port_info *p = netdev_priv(dev);
2478                 int status;
2479
2480                 if (!netif_running(dev))
2481                         continue;
2482
2483                 status = 0;
2484                 if (netif_running(dev) && netif_carrier_ok(dev))
2485                         status = t3b2_mac_watchdog_task(&p->mac);
2486                 if (status == 1)
2487                         p->mac.stats.num_toggled++;
2488                 else if (status == 2) {
2489                         struct cmac *mac = &p->mac;
2490
2491                         t3_mac_set_mtu(mac, dev->mtu);
2492                         t3_mac_set_address(mac, 0, dev->dev_addr);
2493                         cxgb_set_rxmode(dev);
2494                         t3_link_start(&p->phy, mac, &p->link_config);
2495                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2496                         t3_port_intr_enable(adapter, p->port_id);
2497                         p->mac.stats.num_resets++;
2498                 }
2499         }
2500         rtnl_unlock();
2501 }
2502
2503
2504 static void t3_adap_check_task(struct work_struct *work)
2505 {
2506         struct adapter *adapter = container_of(work, struct adapter,
2507                                                adap_check_task.work);
2508         const struct adapter_params *p = &adapter->params;
2509
2510         adapter->check_task_cnt++;
2511
2512         /* Check link status for PHYs without interrupts */
2513         if (p->linkpoll_period)
2514                 check_link_status(adapter);
2515
2516         /* Accumulate MAC stats if needed */
2517         if (!p->linkpoll_period ||
2518             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2519             p->stats_update_period) {
2520                 mac_stats_update(adapter);
2521                 adapter->check_task_cnt = 0;
2522         }
2523
2524         if (p->rev == T3_REV_B2)
2525                 check_t3b2_mac(adapter);
2526
2527         /* Schedule the next check update if any port is active. */
2528         spin_lock_irq(&adapter->work_lock);
2529         if (adapter->open_device_map & PORT_MASK)
2530                 schedule_chk_task(adapter);
2531         spin_unlock_irq(&adapter->work_lock);
2532 }
2533
2534 /*
2535  * Processes external (PHY) interrupts in process context.
2536  */
2537 static void ext_intr_task(struct work_struct *work)
2538 {
2539         struct adapter *adapter = container_of(work, struct adapter,
2540                                                ext_intr_handler_task);
2541
2542         t3_phy_intr_handler(adapter);
2543
2544         /* Now reenable external interrupts */
2545         spin_lock_irq(&adapter->work_lock);
2546         if (adapter->slow_intr_mask) {
2547                 adapter->slow_intr_mask |= F_T3DBG;
2548                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2549                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2550                              adapter->slow_intr_mask);
2551         }
2552         spin_unlock_irq(&adapter->work_lock);
2553 }
2554
2555 /*
2556  * Interrupt-context handler for external (PHY) interrupts.
2557  */
2558 void t3_os_ext_intr_handler(struct adapter *adapter)
2559 {
2560         /*
2561          * Schedule a task to handle external interrupts as they may be slow
2562          * and we use a mutex to protect MDIO registers.  We disable PHY
2563          * interrupts in the meantime and let the task reenable them when
2564          * it's done.
2565          */
2566         spin_lock(&adapter->work_lock);
2567         if (adapter->slow_intr_mask) {
2568                 adapter->slow_intr_mask &= ~F_T3DBG;
2569                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2570                              adapter->slow_intr_mask);
2571                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2572         }
2573         spin_unlock(&adapter->work_lock);
2574 }
2575
2576 static int t3_adapter_error(struct adapter *adapter, int reset)
2577 {
2578         int i, ret = 0;
2579
2580         /* Stop all ports */
2581         for_each_port(adapter, i) {
2582                 struct net_device *netdev = adapter->port[i];
2583
2584                 if (netif_running(netdev))
2585                         cxgb_close(netdev);
2586         }
2587
2588         if (is_offload(adapter) &&
2589             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2590                 offload_close(&adapter->tdev);
2591
2592         /* Stop SGE timers */
2593         t3_stop_sge_timers(adapter);
2594
2595         adapter->flags &= ~FULL_INIT_DONE;
2596
2597         if (reset)
2598                 ret = t3_reset_adapter(adapter);
2599
2600         pci_disable_device(adapter->pdev);
2601
2602         return ret;
2603 }
2604
2605 static int t3_reenable_adapter(struct adapter *adapter)
2606 {
2607         if (pci_enable_device(adapter->pdev)) {
2608                 dev_err(&adapter->pdev->dev,
2609                         "Cannot re-enable PCI device after reset.\n");
2610                 goto err;
2611         }
2612         pci_set_master(adapter->pdev);
2613         pci_restore_state(adapter->pdev);
2614
2615         /* Free sge resources */
2616         t3_free_sge_resources(adapter);
2617
2618         if (t3_replay_prep_adapter(adapter))
2619                 goto err;
2620
2621         return 0;
2622 err:
2623         return -1;
2624 }
2625
2626 static void t3_resume_ports(struct adapter *adapter)
2627 {
2628         int i;
2629
2630         /* Restart the ports */
2631         for_each_port(adapter, i) {
2632                 struct net_device *netdev = adapter->port[i];
2633
2634                 if (netif_running(netdev)) {
2635                         if (cxgb_open(netdev)) {
2636                                 dev_err(&adapter->pdev->dev,
2637                                         "can't bring device back up"
2638                                         " after reset\n");
2639                                 continue;
2640                         }
2641                 }
2642         }
2643 }
2644
2645 /*
2646  * processes a fatal error.
2647  * Bring the ports down, reset the chip, bring the ports back up.
2648  */
2649 static void fatal_error_task(struct work_struct *work)
2650 {
2651         struct adapter *adapter = container_of(work, struct adapter,
2652                                                fatal_error_handler_task);
2653         int err = 0;
2654
2655         rtnl_lock();
2656         err = t3_adapter_error(adapter, 1);
2657         if (!err)
2658                 err = t3_reenable_adapter(adapter);
2659         if (!err)
2660                 t3_resume_ports(adapter);
2661
2662         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2663         rtnl_unlock();
2664 }
2665
2666 void t3_fatal_err(struct adapter *adapter)
2667 {
2668         unsigned int fw_status[4];
2669
2670         if (adapter->flags & FULL_INIT_DONE) {
2671                 t3_sge_stop(adapter);
2672                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2673                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2674                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2675                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2676
2677                 spin_lock(&adapter->work_lock);
2678                 t3_intr_disable(adapter);
2679                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2680                 spin_unlock(&adapter->work_lock);
2681         }
2682         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2683         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2684                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2685                          fw_status[0], fw_status[1],
2686                          fw_status[2], fw_status[3]);
2687
2688 }
2689
2690 /**
2691  * t3_io_error_detected - called when PCI error is detected
2692  * @pdev: Pointer to PCI device
2693  * @state: The current pci connection state
2694  *
2695  * This function is called after a PCI bus error affecting
2696  * this device has been detected.
2697  */
2698 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2699                                              pci_channel_state_t state)
2700 {
2701         struct adapter *adapter = pci_get_drvdata(pdev);
2702         int ret;
2703
2704         ret = t3_adapter_error(adapter, 0);
2705
2706         /* Request a slot reset. */
2707         return PCI_ERS_RESULT_NEED_RESET;
2708 }
2709
2710 /**
2711  * t3_io_slot_reset - called after the pci bus has been reset.
2712  * @pdev: Pointer to PCI device
2713  *
2714  * Restart the card from scratch, as if from a cold-boot.
2715  */
2716 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2717 {
2718         struct adapter *adapter = pci_get_drvdata(pdev);
2719
2720         if (!t3_reenable_adapter(adapter))
2721                 return PCI_ERS_RESULT_RECOVERED;
2722
2723         return PCI_ERS_RESULT_DISCONNECT;
2724 }
2725
2726 /**
2727  * t3_io_resume - called when traffic can start flowing again.
2728  * @pdev: Pointer to PCI device
2729  *
2730  * This callback is called when the error recovery driver tells us that
2731  * its OK to resume normal operation.
2732  */
2733 static void t3_io_resume(struct pci_dev *pdev)
2734 {
2735         struct adapter *adapter = pci_get_drvdata(pdev);
2736
2737         t3_resume_ports(adapter);
2738 }
2739
2740 static struct pci_error_handlers t3_err_handler = {
2741         .error_detected = t3_io_error_detected,
2742         .slot_reset = t3_io_slot_reset,
2743         .resume = t3_io_resume,
2744 };
2745
2746 /*
2747  * Set the number of qsets based on the number of CPUs and the number of ports,
2748  * not to exceed the number of available qsets, assuming there are enough qsets
2749  * per port in HW.
2750  */
2751 static void set_nqsets(struct adapter *adap)
2752 {
2753         int i, j = 0;
2754         int num_cpus = num_online_cpus();
2755         int hwports = adap->params.nports;
2756         int nqsets = SGE_QSETS;
2757
2758         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2759                 if (hwports == 2 &&
2760                     (hwports * nqsets > SGE_QSETS ||
2761                      num_cpus >= nqsets / hwports))
2762                         nqsets /= hwports;
2763                 if (nqsets > num_cpus)
2764                         nqsets = num_cpus;
2765                 if (nqsets < 1 || hwports == 4)
2766                         nqsets = 1;
2767         } else
2768                 nqsets = 1;
2769
2770         for_each_port(adap, i) {
2771                 struct port_info *pi = adap2pinfo(adap, i);
2772
2773                 pi->first_qset = j;
2774                 pi->nqsets = nqsets;
2775                 j = pi->first_qset + nqsets;
2776
2777                 dev_info(&adap->pdev->dev,
2778                          "Port %d using %d queue sets.\n", i, nqsets);
2779         }
2780 }
2781
2782 static int __devinit cxgb_enable_msix(struct adapter *adap)
2783 {
2784         struct msix_entry entries[SGE_QSETS + 1];
2785         int i, err;
2786
2787         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2788                 entries[i].entry = i;
2789
2790         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2791         if (!err) {
2792                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2793                         adap->msix_info[i].vec = entries[i].vector;
2794         } else if (err > 0)
2795                 dev_info(&adap->pdev->dev,
2796                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2797         return err;
2798 }
2799
2800 static void __devinit print_port_info(struct adapter *adap,
2801                                       const struct adapter_info *ai)
2802 {
2803         static const char *pci_variant[] = {
2804                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2805         };
2806
2807         int i;
2808         char buf[80];
2809
2810         if (is_pcie(adap))
2811                 snprintf(buf, sizeof(buf), "%s x%d",
2812                          pci_variant[adap->params.pci.variant],
2813                          adap->params.pci.width);
2814         else
2815                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2816                          pci_variant[adap->params.pci.variant],
2817                          adap->params.pci.speed, adap->params.pci.width);
2818
2819         for_each_port(adap, i) {
2820                 struct net_device *dev = adap->port[i];
2821                 const struct port_info *pi = netdev_priv(dev);
2822
2823                 if (!test_bit(i, &adap->registered_device_map))
2824                         continue;
2825                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2826                        dev->name, ai->desc, pi->phy.desc,
2827                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2828                        (adap->flags & USING_MSIX) ? " MSI-X" :
2829                        (adap->flags & USING_MSI) ? " MSI" : "");
2830                 if (adap->name == dev->name && adap->params.vpd.mclk)
2831                         printk(KERN_INFO
2832                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2833                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2834                                t3_mc7_size(&adap->pmtx) >> 20,
2835                                t3_mc7_size(&adap->pmrx) >> 20,
2836                                adap->params.vpd.sn);
2837         }
2838 }
2839
2840 static const struct net_device_ops cxgb_netdev_ops = {
2841         .ndo_open               = cxgb_open,
2842         .ndo_stop               = cxgb_close,
2843         .ndo_start_xmit         = t3_eth_xmit,
2844         .ndo_get_stats          = cxgb_get_stats,
2845         .ndo_validate_addr      = eth_validate_addr,
2846         .ndo_set_multicast_list = cxgb_set_rxmode,
2847         .ndo_do_ioctl           = cxgb_ioctl,
2848         .ndo_change_mtu         = cxgb_change_mtu,
2849         .ndo_set_mac_address    = cxgb_set_mac_addr,
2850         .ndo_vlan_rx_register   = vlan_rx_register,
2851 #ifdef CONFIG_NET_POLL_CONTROLLER
2852         .ndo_poll_controller    = cxgb_netpoll,
2853 #endif
2854 };
2855
2856 static int __devinit init_one(struct pci_dev *pdev,
2857                               const struct pci_device_id *ent)
2858 {
2859         static int version_printed;
2860
2861         int i, err, pci_using_dac = 0;
2862         unsigned long mmio_start, mmio_len;
2863         const struct adapter_info *ai;
2864         struct adapter *adapter = NULL;
2865         struct port_info *pi;
2866
2867         if (!version_printed) {
2868                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2869                 ++version_printed;
2870         }
2871
2872         if (!cxgb3_wq) {
2873                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2874                 if (!cxgb3_wq) {
2875                         printk(KERN_ERR DRV_NAME
2876                                ": cannot initialize work queue\n");
2877                         return -ENOMEM;
2878                 }
2879         }
2880
2881         err = pci_request_regions(pdev, DRV_NAME);
2882         if (err) {
2883                 /* Just info, some other driver may have claimed the device. */
2884                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2885                 return err;
2886         }
2887
2888         err = pci_enable_device(pdev);
2889         if (err) {
2890                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2891                 goto out_release_regions;
2892         }
2893
2894         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2895                 pci_using_dac = 1;
2896                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2897                 if (err) {
2898                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2899                                "coherent allocations\n");
2900                         goto out_disable_device;
2901                 }
2902         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2903                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2904                 goto out_disable_device;
2905         }
2906
2907         pci_set_master(pdev);
2908         pci_save_state(pdev);
2909
2910         mmio_start = pci_resource_start(pdev, 0);
2911         mmio_len = pci_resource_len(pdev, 0);
2912         ai = t3_get_adapter_info(ent->driver_data);
2913
2914         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2915         if (!adapter) {
2916                 err = -ENOMEM;
2917                 goto out_disable_device;
2918         }
2919
2920         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2921         if (!adapter->regs) {
2922                 dev_err(&pdev->dev, "cannot map device registers\n");
2923                 err = -ENOMEM;
2924                 goto out_free_adapter;
2925         }
2926
2927         adapter->pdev = pdev;
2928         adapter->name = pci_name(pdev);
2929         adapter->msg_enable = dflt_msg_enable;
2930         adapter->mmio_len = mmio_len;
2931
2932         mutex_init(&adapter->mdio_lock);
2933         spin_lock_init(&adapter->work_lock);
2934         spin_lock_init(&adapter->stats_lock);
2935
2936         INIT_LIST_HEAD(&adapter->adapter_list);
2937         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2938         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2939         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2940
2941         for (i = 0; i < ai->nports; ++i) {
2942                 struct net_device *netdev;
2943
2944                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2945                 if (!netdev) {
2946                         err = -ENOMEM;
2947                         goto out_free_dev;
2948                 }
2949
2950                 SET_NETDEV_DEV(netdev, &pdev->dev);
2951
2952                 adapter->port[i] = netdev;
2953                 pi = netdev_priv(netdev);
2954                 pi->adapter = adapter;
2955                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2956                 pi->port_id = i;
2957                 netif_carrier_off(netdev);
2958                 netif_tx_stop_all_queues(netdev);
2959                 netdev->irq = pdev->irq;
2960                 netdev->mem_start = mmio_start;
2961                 netdev->mem_end = mmio_start + mmio_len - 1;
2962                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2963                 netdev->features |= NETIF_F_LLTX;
2964                 netdev->features |= NETIF_F_LRO;
2965                 if (pci_using_dac)
2966                         netdev->features |= NETIF_F_HIGHDMA;
2967
2968                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2969                 netdev->netdev_ops = &cxgb_netdev_ops;
2970                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2971         }
2972
2973         pci_set_drvdata(pdev, adapter);
2974         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2975                 err = -ENODEV;
2976                 goto out_free_dev;
2977         }
2978
2979         /*
2980          * The card is now ready to go.  If any errors occur during device
2981          * registration we do not fail the whole card but rather proceed only
2982          * with the ports we manage to register successfully.  However we must
2983          * register at least one net device.
2984          */
2985         for_each_port(adapter, i) {
2986                 err = register_netdev(adapter->port[i]);
2987                 if (err)
2988                         dev_warn(&pdev->dev,
2989                                  "cannot register net device %s, skipping\n",
2990                                  adapter->port[i]->name);
2991                 else {
2992                         /*
2993                          * Change the name we use for messages to the name of
2994                          * the first successfully registered interface.
2995                          */
2996                         if (!adapter->registered_device_map)
2997                                 adapter->name = adapter->port[i]->name;
2998
2999                         __set_bit(i, &adapter->registered_device_map);
3000                 }
3001         }
3002         if (!adapter->registered_device_map) {
3003                 dev_err(&pdev->dev, "could not register any net devices\n");
3004                 goto out_free_dev;
3005         }
3006
3007         /* Driver's ready. Reflect it on LEDs */
3008         t3_led_ready(adapter);
3009
3010         if (is_offload(adapter)) {
3011                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3012                 cxgb3_adapter_ofld(adapter);
3013         }
3014
3015         /* See what interrupts we'll be using */
3016         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3017                 adapter->flags |= USING_MSIX;
3018         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3019                 adapter->flags |= USING_MSI;
3020
3021         set_nqsets(adapter);
3022
3023         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3024                                  &cxgb3_attr_group);
3025
3026         print_port_info(adapter, ai);
3027         return 0;
3028
3029 out_free_dev:
3030         iounmap(adapter->regs);
3031         for (i = ai->nports - 1; i >= 0; --i)
3032                 if (adapter->port[i])
3033                         free_netdev(adapter->port[i]);
3034
3035 out_free_adapter:
3036         kfree(adapter);
3037
3038 out_disable_device:
3039         pci_disable_device(pdev);
3040 out_release_regions:
3041         pci_release_regions(pdev);
3042         pci_set_drvdata(pdev, NULL);
3043         return err;
3044 }
3045
3046 static void __devexit remove_one(struct pci_dev *pdev)
3047 {
3048         struct adapter *adapter = pci_get_drvdata(pdev);
3049
3050         if (adapter) {
3051                 int i;
3052
3053                 t3_sge_stop(adapter);
3054                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3055                                    &cxgb3_attr_group);
3056
3057                 if (is_offload(adapter)) {
3058                         cxgb3_adapter_unofld(adapter);
3059                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3060                                      &adapter->open_device_map))
3061                                 offload_close(&adapter->tdev);
3062                 }
3063
3064                 for_each_port(adapter, i)
3065                     if (test_bit(i, &adapter->registered_device_map))
3066                         unregister_netdev(adapter->port[i]);
3067
3068                 t3_stop_sge_timers(adapter);
3069                 t3_free_sge_resources(adapter);
3070                 cxgb_disable_msi(adapter);
3071
3072                 for_each_port(adapter, i)
3073                         if (adapter->port[i])
3074                                 free_netdev(adapter->port[i]);
3075
3076                 iounmap(adapter->regs);
3077                 kfree(adapter);
3078                 pci_release_regions(pdev);
3079                 pci_disable_device(pdev);
3080                 pci_set_drvdata(pdev, NULL);
3081         }
3082 }
3083
3084 static struct pci_driver driver = {
3085         .name = DRV_NAME,
3086         .id_table = cxgb3_pci_tbl,
3087         .probe = init_one,
3088         .remove = __devexit_p(remove_one),
3089         .err_handler = &t3_err_handler,
3090 };
3091
3092 static int __init cxgb3_init_module(void)
3093 {
3094         int ret;
3095
3096         cxgb3_offload_init();
3097
3098         ret = pci_register_driver(&driver);
3099         return ret;
3100 }
3101
3102 static void __exit cxgb3_cleanup_module(void)
3103 {
3104         pci_unregister_driver(&driver);
3105         if (cxgb3_wq)
3106                 destroy_workqueue(cxgb3_wq);
3107 }
3108
3109 module_init(cxgb3_init_module);
3110 module_exit(cxgb3_cleanup_module);