]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/e1000/e1000_main.c
e1000: Use hw, er32, and ew32
[linux-2.6-omap-h63xx.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
35 #define DRIVERNAPI
36 #else
37 #define DRIVERNAPI "-NAPI"
38 #endif
39 #define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40 const char e1000_driver_version[] = DRV_VERSION;
41 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42
43 /* e1000_pci_tbl - PCI Device ID Table
44  *
45  * Last entry must be all 0s
46  *
47  * Macro expands to...
48  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49  */
50 static struct pci_device_id e1000_pci_tbl[] = {
51         INTEL_E1000_ETHERNET_DEVICE(0x1000),
52         INTEL_E1000_ETHERNET_DEVICE(0x1001),
53         INTEL_E1000_ETHERNET_DEVICE(0x1004),
54         INTEL_E1000_ETHERNET_DEVICE(0x1008),
55         INTEL_E1000_ETHERNET_DEVICE(0x1009),
56         INTEL_E1000_ETHERNET_DEVICE(0x100C),
57         INTEL_E1000_ETHERNET_DEVICE(0x100D),
58         INTEL_E1000_ETHERNET_DEVICE(0x100E),
59         INTEL_E1000_ETHERNET_DEVICE(0x100F),
60         INTEL_E1000_ETHERNET_DEVICE(0x1010),
61         INTEL_E1000_ETHERNET_DEVICE(0x1011),
62         INTEL_E1000_ETHERNET_DEVICE(0x1012),
63         INTEL_E1000_ETHERNET_DEVICE(0x1013),
64         INTEL_E1000_ETHERNET_DEVICE(0x1014),
65         INTEL_E1000_ETHERNET_DEVICE(0x1015),
66         INTEL_E1000_ETHERNET_DEVICE(0x1016),
67         INTEL_E1000_ETHERNET_DEVICE(0x1017),
68         INTEL_E1000_ETHERNET_DEVICE(0x1018),
69         INTEL_E1000_ETHERNET_DEVICE(0x1019),
70         INTEL_E1000_ETHERNET_DEVICE(0x101A),
71         INTEL_E1000_ETHERNET_DEVICE(0x101D),
72         INTEL_E1000_ETHERNET_DEVICE(0x101E),
73         INTEL_E1000_ETHERNET_DEVICE(0x1026),
74         INTEL_E1000_ETHERNET_DEVICE(0x1027),
75         INTEL_E1000_ETHERNET_DEVICE(0x1028),
76         INTEL_E1000_ETHERNET_DEVICE(0x1075),
77         INTEL_E1000_ETHERNET_DEVICE(0x1076),
78         INTEL_E1000_ETHERNET_DEVICE(0x1077),
79         INTEL_E1000_ETHERNET_DEVICE(0x1078),
80         INTEL_E1000_ETHERNET_DEVICE(0x1079),
81         INTEL_E1000_ETHERNET_DEVICE(0x107A),
82         INTEL_E1000_ETHERNET_DEVICE(0x107B),
83         INTEL_E1000_ETHERNET_DEVICE(0x107C),
84         INTEL_E1000_ETHERNET_DEVICE(0x108A),
85         INTEL_E1000_ETHERNET_DEVICE(0x1099),
86         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
98 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
99 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
101 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
102 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
103                              struct e1000_tx_ring *txdr);
104 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
105                              struct e1000_rx_ring *rxdr);
106 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
107                              struct e1000_tx_ring *tx_ring);
108 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
109                              struct e1000_rx_ring *rx_ring);
110 void e1000_update_stats(struct e1000_adapter *adapter);
111
112 static int e1000_init_module(void);
113 static void e1000_exit_module(void);
114 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
115 static void __devexit e1000_remove(struct pci_dev *pdev);
116 static int e1000_alloc_queues(struct e1000_adapter *adapter);
117 static int e1000_sw_init(struct e1000_adapter *adapter);
118 static int e1000_open(struct net_device *netdev);
119 static int e1000_close(struct net_device *netdev);
120 static void e1000_configure_tx(struct e1000_adapter *adapter);
121 static void e1000_configure_rx(struct e1000_adapter *adapter);
122 static void e1000_setup_rctl(struct e1000_adapter *adapter);
123 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
125 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
126                                 struct e1000_tx_ring *tx_ring);
127 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
128                                 struct e1000_rx_ring *rx_ring);
129 static void e1000_set_rx_mode(struct net_device *netdev);
130 static void e1000_update_phy_info(unsigned long data);
131 static void e1000_watchdog(unsigned long data);
132 static void e1000_82547_tx_fifo_stall(unsigned long data);
133 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static irqreturn_t e1000_intr_msi(int irq, void *data);
139 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
140                                struct e1000_tx_ring *tx_ring);
141 #ifdef CONFIG_E1000_NAPI
142 static int e1000_clean(struct napi_struct *napi, int budget);
143 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
144                                struct e1000_rx_ring *rx_ring,
145                                int *work_done, int work_to_do);
146 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
147                                   struct e1000_rx_ring *rx_ring,
148                                   int *work_done, int work_to_do);
149 #else
150 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
151                                struct e1000_rx_ring *rx_ring);
152 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
153                                   struct e1000_rx_ring *rx_ring);
154 #endif
155 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
156                                    struct e1000_rx_ring *rx_ring,
157                                    int cleaned_count);
158 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
159                                       struct e1000_rx_ring *rx_ring,
160                                       int cleaned_count);
161 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
162 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
163                            int cmd);
164 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
165 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
166 static void e1000_tx_timeout(struct net_device *dev);
167 static void e1000_reset_task(struct work_struct *work);
168 static void e1000_smartspeed(struct e1000_adapter *adapter);
169 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
170                                        struct sk_buff *skb);
171
172 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
173 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
174 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
176
177 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
178 #ifdef CONFIG_PM
179 static int e1000_resume(struct pci_dev *pdev);
180 #endif
181 static void e1000_shutdown(struct pci_dev *pdev);
182
183 #ifdef CONFIG_NET_POLL_CONTROLLER
184 /* for netdump / net console */
185 static void e1000_netpoll (struct net_device *netdev);
186 #endif
187
188 #define COPYBREAK_DEFAULT 256
189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
190 module_param(copybreak, uint, 0644);
191 MODULE_PARM_DESC(copybreak,
192         "Maximum size of packet that is copied to a new buffer on receive");
193
194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
195                      pci_channel_state_t state);
196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
197 static void e1000_io_resume(struct pci_dev *pdev);
198
199 static struct pci_error_handlers e1000_err_handler = {
200         .error_detected = e1000_io_error_detected,
201         .slot_reset = e1000_io_slot_reset,
202         .resume = e1000_io_resume,
203 };
204
205 static struct pci_driver e1000_driver = {
206         .name     = e1000_driver_name,
207         .id_table = e1000_pci_tbl,
208         .probe    = e1000_probe,
209         .remove   = __devexit_p(e1000_remove),
210 #ifdef CONFIG_PM
211         /* Power Managment Hooks */
212         .suspend  = e1000_suspend,
213         .resume   = e1000_resume,
214 #endif
215         .shutdown = e1000_shutdown,
216         .err_handler = &e1000_err_handler
217 };
218
219 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
220 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_VERSION);
223
224 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
225 module_param(debug, int, 0);
226 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
227
228 /**
229  * e1000_init_module - Driver Registration Routine
230  *
231  * e1000_init_module is the first routine called when the driver is
232  * loaded. All it does is register with the PCI subsystem.
233  **/
234
235 static int __init e1000_init_module(void)
236 {
237         int ret;
238         printk(KERN_INFO "%s - version %s\n",
239                e1000_driver_string, e1000_driver_version);
240
241         printk(KERN_INFO "%s\n", e1000_copyright);
242
243         ret = pci_register_driver(&e1000_driver);
244         if (copybreak != COPYBREAK_DEFAULT) {
245                 if (copybreak == 0)
246                         printk(KERN_INFO "e1000: copybreak disabled\n");
247                 else
248                         printk(KERN_INFO "e1000: copybreak enabled for "
249                                "packets <= %u bytes\n", copybreak);
250         }
251         return ret;
252 }
253
254 module_init(e1000_init_module);
255
256 /**
257  * e1000_exit_module - Driver Exit Cleanup Routine
258  *
259  * e1000_exit_module is called just before the driver is removed
260  * from memory.
261  **/
262
263 static void __exit e1000_exit_module(void)
264 {
265         pci_unregister_driver(&e1000_driver);
266 }
267
268 module_exit(e1000_exit_module);
269
270 static int e1000_request_irq(struct e1000_adapter *adapter)
271 {
272         struct e1000_hw *hw = &adapter->hw;
273         struct net_device *netdev = adapter->netdev;
274         irq_handler_t handler = e1000_intr;
275         int irq_flags = IRQF_SHARED;
276         int err;
277
278         if (hw->mac_type >= e1000_82571) {
279                 adapter->have_msi = !pci_enable_msi(adapter->pdev);
280                 if (adapter->have_msi) {
281                         handler = e1000_intr_msi;
282                         irq_flags = 0;
283                 }
284         }
285
286         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287                           netdev);
288         if (err) {
289                 if (adapter->have_msi)
290                         pci_disable_msi(adapter->pdev);
291                 DPRINTK(PROBE, ERR,
292                         "Unable to allocate interrupt Error: %d\n", err);
293         }
294
295         return err;
296 }
297
298 static void e1000_free_irq(struct e1000_adapter *adapter)
299 {
300         struct net_device *netdev = adapter->netdev;
301
302         free_irq(adapter->pdev->irq, netdev);
303
304         if (adapter->have_msi)
305                 pci_disable_msi(adapter->pdev);
306 }
307
308 /**
309  * e1000_irq_disable - Mask off interrupt generation on the NIC
310  * @adapter: board private structure
311  **/
312
313 static void e1000_irq_disable(struct e1000_adapter *adapter)
314 {
315         struct e1000_hw *hw = &adapter->hw;
316
317         ew32(IMC, ~0);
318         E1000_WRITE_FLUSH();
319         synchronize_irq(adapter->pdev->irq);
320 }
321
322 /**
323  * e1000_irq_enable - Enable default interrupt generation settings
324  * @adapter: board private structure
325  **/
326
327 static void e1000_irq_enable(struct e1000_adapter *adapter)
328 {
329         struct e1000_hw *hw = &adapter->hw;
330
331         ew32(IMS, IMS_ENABLE_MASK);
332         E1000_WRITE_FLUSH();
333 }
334
335 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
336 {
337         struct e1000_hw *hw = &adapter->hw;
338         struct net_device *netdev = adapter->netdev;
339         u16 vid = hw->mng_cookie.vlan_id;
340         u16 old_vid = adapter->mng_vlan_id;
341         if (adapter->vlgrp) {
342                 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
343                         if (hw->mng_cookie.status &
344                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
345                                 e1000_vlan_rx_add_vid(netdev, vid);
346                                 adapter->mng_vlan_id = vid;
347                         } else
348                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
349
350                         if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
351                                         (vid != old_vid) &&
352                             !vlan_group_get_device(adapter->vlgrp, old_vid))
353                                 e1000_vlan_rx_kill_vid(netdev, old_vid);
354                 } else
355                         adapter->mng_vlan_id = vid;
356         }
357 }
358
359 /**
360  * e1000_release_hw_control - release control of the h/w to f/w
361  * @adapter: address of board private structure
362  *
363  * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
364  * For ASF and Pass Through versions of f/w this means that the
365  * driver is no longer loaded. For AMT version (only with 82573) i
366  * of the f/w this means that the network i/f is closed.
367  *
368  **/
369
370 static void e1000_release_hw_control(struct e1000_adapter *adapter)
371 {
372         u32 ctrl_ext;
373         u32 swsm;
374         struct e1000_hw *hw = &adapter->hw;
375
376         /* Let firmware taken over control of h/w */
377         switch (hw->mac_type) {
378         case e1000_82573:
379                 swsm = er32(SWSM);
380                 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
381                 break;
382         case e1000_82571:
383         case e1000_82572:
384         case e1000_80003es2lan:
385         case e1000_ich8lan:
386                 ctrl_ext = er32(CTRL_EXT);
387                 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
388                 break;
389         default:
390                 break;
391         }
392 }
393
394 /**
395  * e1000_get_hw_control - get control of the h/w from f/w
396  * @adapter: address of board private structure
397  *
398  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
399  * For ASF and Pass Through versions of f/w this means that
400  * the driver is loaded. For AMT version (only with 82573)
401  * of the f/w this means that the network i/f is open.
402  *
403  **/
404
405 static void e1000_get_hw_control(struct e1000_adapter *adapter)
406 {
407         u32 ctrl_ext;
408         u32 swsm;
409         struct e1000_hw *hw = &adapter->hw;
410
411         /* Let firmware know the driver has taken over */
412         switch (hw->mac_type) {
413         case e1000_82573:
414                 swsm = er32(SWSM);
415                 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
416                 break;
417         case e1000_82571:
418         case e1000_82572:
419         case e1000_80003es2lan:
420         case e1000_ich8lan:
421                 ctrl_ext = er32(CTRL_EXT);
422                 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
423                 break;
424         default:
425                 break;
426         }
427 }
428
429 static void e1000_init_manageability(struct e1000_adapter *adapter)
430 {
431         struct e1000_hw *hw = &adapter->hw;
432
433         if (adapter->en_mng_pt) {
434                 u32 manc = er32(MANC);
435
436                 /* disable hardware interception of ARP */
437                 manc &= ~(E1000_MANC_ARP_EN);
438
439                 /* enable receiving management packets to the host */
440                 /* this will probably generate destination unreachable messages
441                  * from the host OS, but the packets will be handled on SMBUS */
442                 if (hw->has_manc2h) {
443                         u32 manc2h = er32(MANC2H);
444
445                         manc |= E1000_MANC_EN_MNG2HOST;
446 #define E1000_MNG2HOST_PORT_623 (1 << 5)
447 #define E1000_MNG2HOST_PORT_664 (1 << 6)
448                         manc2h |= E1000_MNG2HOST_PORT_623;
449                         manc2h |= E1000_MNG2HOST_PORT_664;
450                         ew32(MANC2H, manc2h);
451                 }
452
453                 ew32(MANC, manc);
454         }
455 }
456
457 static void e1000_release_manageability(struct e1000_adapter *adapter)
458 {
459         struct e1000_hw *hw = &adapter->hw;
460
461         if (adapter->en_mng_pt) {
462                 u32 manc = er32(MANC);
463
464                 /* re-enable hardware interception of ARP */
465                 manc |= E1000_MANC_ARP_EN;
466
467                 if (hw->has_manc2h)
468                         manc &= ~E1000_MANC_EN_MNG2HOST;
469
470                 /* don't explicitly have to mess with MANC2H since
471                  * MANC has an enable disable that gates MANC2H */
472
473                 ew32(MANC, manc);
474         }
475 }
476
477 /**
478  * e1000_configure - configure the hardware for RX and TX
479  * @adapter = private board structure
480  **/
481 static void e1000_configure(struct e1000_adapter *adapter)
482 {
483         struct net_device *netdev = adapter->netdev;
484         int i;
485
486         e1000_set_rx_mode(netdev);
487
488         e1000_restore_vlan(adapter);
489         e1000_init_manageability(adapter);
490
491         e1000_configure_tx(adapter);
492         e1000_setup_rctl(adapter);
493         e1000_configure_rx(adapter);
494         /* call E1000_DESC_UNUSED which always leaves
495          * at least 1 descriptor unused to make sure
496          * next_to_use != next_to_clean */
497         for (i = 0; i < adapter->num_rx_queues; i++) {
498                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
499                 adapter->alloc_rx_buf(adapter, ring,
500                                       E1000_DESC_UNUSED(ring));
501         }
502
503         adapter->tx_queue_len = netdev->tx_queue_len;
504 }
505
506 int e1000_up(struct e1000_adapter *adapter)
507 {
508         struct e1000_hw *hw = &adapter->hw;
509
510         /* hardware has been reset, we need to reload some things */
511         e1000_configure(adapter);
512
513         clear_bit(__E1000_DOWN, &adapter->flags);
514
515 #ifdef CONFIG_E1000_NAPI
516         napi_enable(&adapter->napi);
517 #endif
518         e1000_irq_enable(adapter);
519
520         /* fire a link change interrupt to start the watchdog */
521         ew32(ICS, E1000_ICS_LSC);
522         return 0;
523 }
524
525 /**
526  * e1000_power_up_phy - restore link in case the phy was powered down
527  * @adapter: address of board private structure
528  *
529  * The phy may be powered down to save power and turn off link when the
530  * driver is unloaded and wake on lan is not enabled (among others)
531  * *** this routine MUST be followed by a call to e1000_reset ***
532  *
533  **/
534
535 void e1000_power_up_phy(struct e1000_adapter *adapter)
536 {
537         struct e1000_hw *hw = &adapter->hw;
538         u16 mii_reg = 0;
539
540         /* Just clear the power down bit to wake the phy back up */
541         if (hw->media_type == e1000_media_type_copper) {
542                 /* according to the manual, the phy will retain its
543                  * settings across a power-down/up cycle */
544                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
545                 mii_reg &= ~MII_CR_POWER_DOWN;
546                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
547         }
548 }
549
550 static void e1000_power_down_phy(struct e1000_adapter *adapter)
551 {
552         struct e1000_hw *hw = &adapter->hw;
553
554         /* Power down the PHY so no link is implied when interface is down *
555          * The PHY cannot be powered down if any of the following is true *
556          * (a) WoL is enabled
557          * (b) AMT is active
558          * (c) SoL/IDER session is active */
559         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
560            hw->media_type == e1000_media_type_copper) {
561                 u16 mii_reg = 0;
562
563                 switch (hw->mac_type) {
564                 case e1000_82540:
565                 case e1000_82545:
566                 case e1000_82545_rev_3:
567                 case e1000_82546:
568                 case e1000_82546_rev_3:
569                 case e1000_82541:
570                 case e1000_82541_rev_2:
571                 case e1000_82547:
572                 case e1000_82547_rev_2:
573                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
574                                 goto out;
575                         break;
576                 case e1000_82571:
577                 case e1000_82572:
578                 case e1000_82573:
579                 case e1000_80003es2lan:
580                 case e1000_ich8lan:
581                         if (e1000_check_mng_mode(hw) ||
582                             e1000_check_phy_reset_block(hw))
583                                 goto out;
584                         break;
585                 default:
586                         goto out;
587                 }
588                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
589                 mii_reg |= MII_CR_POWER_DOWN;
590                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
591                 mdelay(1);
592         }
593 out:
594         return;
595 }
596
597 void e1000_down(struct e1000_adapter *adapter)
598 {
599         struct net_device *netdev = adapter->netdev;
600
601         /* signal that we're down so the interrupt handler does not
602          * reschedule our watchdog timer */
603         set_bit(__E1000_DOWN, &adapter->flags);
604
605 #ifdef CONFIG_E1000_NAPI
606         napi_disable(&adapter->napi);
607 #endif
608         e1000_irq_disable(adapter);
609
610         del_timer_sync(&adapter->tx_fifo_stall_timer);
611         del_timer_sync(&adapter->watchdog_timer);
612         del_timer_sync(&adapter->phy_info_timer);
613
614         netdev->tx_queue_len = adapter->tx_queue_len;
615         adapter->link_speed = 0;
616         adapter->link_duplex = 0;
617         netif_carrier_off(netdev);
618         netif_stop_queue(netdev);
619
620         e1000_reset(adapter);
621         e1000_clean_all_tx_rings(adapter);
622         e1000_clean_all_rx_rings(adapter);
623 }
624
625 void e1000_reinit_locked(struct e1000_adapter *adapter)
626 {
627         WARN_ON(in_interrupt());
628         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
629                 msleep(1);
630         e1000_down(adapter);
631         e1000_up(adapter);
632         clear_bit(__E1000_RESETTING, &adapter->flags);
633 }
634
635 void e1000_reset(struct e1000_adapter *adapter)
636 {
637         struct e1000_hw *hw = &adapter->hw;
638         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
639         u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
640         bool legacy_pba_adjust = false;
641
642         /* Repartition Pba for greater than 9k mtu
643          * To take effect CTRL.RST is required.
644          */
645
646         switch (hw->mac_type) {
647         case e1000_82542_rev2_0:
648         case e1000_82542_rev2_1:
649         case e1000_82543:
650         case e1000_82544:
651         case e1000_82540:
652         case e1000_82541:
653         case e1000_82541_rev_2:
654                 legacy_pba_adjust = true;
655                 pba = E1000_PBA_48K;
656                 break;
657         case e1000_82545:
658         case e1000_82545_rev_3:
659         case e1000_82546:
660         case e1000_82546_rev_3:
661                 pba = E1000_PBA_48K;
662                 break;
663         case e1000_82547:
664         case e1000_82547_rev_2:
665                 legacy_pba_adjust = true;
666                 pba = E1000_PBA_30K;
667                 break;
668         case e1000_82571:
669         case e1000_82572:
670         case e1000_80003es2lan:
671                 pba = E1000_PBA_38K;
672                 break;
673         case e1000_82573:
674                 pba = E1000_PBA_20K;
675                 break;
676         case e1000_ich8lan:
677                 pba = E1000_PBA_8K;
678         case e1000_undefined:
679         case e1000_num_macs:
680                 break;
681         }
682
683         if (legacy_pba_adjust) {
684                 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
685                         pba -= 8; /* allocate more FIFO for Tx */
686
687                 if (hw->mac_type == e1000_82547) {
688                         adapter->tx_fifo_head = 0;
689                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
690                         adapter->tx_fifo_size =
691                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
692                         atomic_set(&adapter->tx_fifo_stall, 0);
693                 }
694         } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
695                 /* adjust PBA for jumbo frames */
696                 ew32(PBA, pba);
697
698                 /* To maintain wire speed transmits, the Tx FIFO should be
699                  * large enough to accomodate two full transmit packets,
700                  * rounded up to the next 1KB and expressed in KB.  Likewise,
701                  * the Rx FIFO should be large enough to accomodate at least
702                  * one full receive packet and is similarly rounded up and
703                  * expressed in KB. */
704                 pba = er32(PBA);
705                 /* upper 16 bits has Tx packet buffer allocation size in KB */
706                 tx_space = pba >> 16;
707                 /* lower 16 bits has Rx packet buffer allocation size in KB */
708                 pba &= 0xffff;
709                 /* don't include ethernet FCS because hardware appends/strips */
710                 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
711                                VLAN_TAG_SIZE;
712                 min_tx_space = min_rx_space;
713                 min_tx_space *= 2;
714                 min_tx_space = ALIGN(min_tx_space, 1024);
715                 min_tx_space >>= 10;
716                 min_rx_space = ALIGN(min_rx_space, 1024);
717                 min_rx_space >>= 10;
718
719                 /* If current Tx allocation is less than the min Tx FIFO size,
720                  * and the min Tx FIFO size is less than the current Rx FIFO
721                  * allocation, take space away from current Rx allocation */
722                 if (tx_space < min_tx_space &&
723                     ((min_tx_space - tx_space) < pba)) {
724                         pba = pba - (min_tx_space - tx_space);
725
726                         /* PCI/PCIx hardware has PBA alignment constraints */
727                         switch (hw->mac_type) {
728                         case e1000_82545 ... e1000_82546_rev_3:
729                                 pba &= ~(E1000_PBA_8K - 1);
730                                 break;
731                         default:
732                                 break;
733                         }
734
735                         /* if short on rx space, rx wins and must trump tx
736                          * adjustment or use Early Receive if available */
737                         if (pba < min_rx_space) {
738                                 switch (hw->mac_type) {
739                                 case e1000_82573:
740                                         /* ERT enabled in e1000_configure_rx */
741                                         break;
742                                 default:
743                                         pba = min_rx_space;
744                                         break;
745                                 }
746                         }
747                 }
748         }
749
750         ew32(PBA, pba);
751
752         /* flow control settings */
753         /* Set the FC high water mark to 90% of the FIFO size.
754          * Required to clear last 3 LSB */
755         fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
756         /* We can't use 90% on small FIFOs because the remainder
757          * would be less than 1 full frame.  In this case, we size
758          * it to allow at least a full frame above the high water
759          *  mark. */
760         if (pba < E1000_PBA_16K)
761                 fc_high_water_mark = (pba * 1024) - 1600;
762
763         hw->fc_high_water = fc_high_water_mark;
764         hw->fc_low_water = fc_high_water_mark - 8;
765         if (hw->mac_type == e1000_80003es2lan)
766                 hw->fc_pause_time = 0xFFFF;
767         else
768                 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
769         hw->fc_send_xon = 1;
770         hw->fc = hw->original_fc;
771
772         /* Allow time for pending master requests to run */
773         e1000_reset_hw(hw);
774         if (hw->mac_type >= e1000_82544)
775                 ew32(WUC, 0);
776
777         if (e1000_init_hw(hw))
778                 DPRINTK(PROBE, ERR, "Hardware Error\n");
779         e1000_update_mng_vlan(adapter);
780
781         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
782         if (hw->mac_type >= e1000_82544 &&
783             hw->mac_type <= e1000_82547_rev_2 &&
784             hw->autoneg == 1 &&
785             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
786                 u32 ctrl = er32(CTRL);
787                 /* clear phy power management bit if we are in gig only mode,
788                  * which if enabled will attempt negotiation to 100Mb, which
789                  * can cause a loss of link at power off or driver unload */
790                 ctrl &= ~E1000_CTRL_SWDPIN3;
791                 ew32(CTRL, ctrl);
792         }
793
794         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
795         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
796
797         e1000_reset_adaptive(hw);
798         e1000_phy_get_info(hw, &adapter->phy_info);
799
800         if (!adapter->smart_power_down &&
801             (hw->mac_type == e1000_82571 ||
802              hw->mac_type == e1000_82572)) {
803                 u16 phy_data = 0;
804                 /* speed up time to link by disabling smart power down, ignore
805                  * the return value of this function because there is nothing
806                  * different we would do if it failed */
807                 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
808                                    &phy_data);
809                 phy_data &= ~IGP02E1000_PM_SPD;
810                 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
811                                     phy_data);
812         }
813
814         e1000_release_manageability(adapter);
815 }
816
817 /**
818  *  Dump the eeprom for users having checksum issues
819  **/
820 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
821 {
822         struct net_device *netdev = adapter->netdev;
823         struct ethtool_eeprom eeprom;
824         const struct ethtool_ops *ops = netdev->ethtool_ops;
825         u8 *data;
826         int i;
827         u16 csum_old, csum_new = 0;
828
829         eeprom.len = ops->get_eeprom_len(netdev);
830         eeprom.offset = 0;
831
832         data = kmalloc(eeprom.len, GFP_KERNEL);
833         if (!data) {
834                 printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
835                        " data\n");
836                 return;
837         }
838
839         ops->get_eeprom(netdev, &eeprom, data);
840
841         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
842                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
843         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
844                 csum_new += data[i] + (data[i + 1] << 8);
845         csum_new = EEPROM_SUM - csum_new;
846
847         printk(KERN_ERR "/*********************/\n");
848         printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
849         printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
850
851         printk(KERN_ERR "Offset    Values\n");
852         printk(KERN_ERR "========  ======\n");
853         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
854
855         printk(KERN_ERR "Include this output when contacting your support "
856                "provider.\n");
857         printk(KERN_ERR "This is not a software error! Something bad "
858                "happened to your hardware or\n");
859         printk(KERN_ERR "EEPROM image. Ignoring this "
860                "problem could result in further problems,\n");
861         printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
862         printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
863                "which is invalid\n");
864         printk(KERN_ERR "and requires you to set the proper MAC "
865                "address manually before continuing\n");
866         printk(KERN_ERR "to enable this network device.\n");
867         printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
868                "to your hardware vendor\n");
869         printk(KERN_ERR "or Intel Customer Support: linux-nics@intel.com\n");
870         printk(KERN_ERR "/*********************/\n");
871
872         kfree(data);
873 }
874
875 /**
876  * e1000_probe - Device Initialization Routine
877  * @pdev: PCI device information struct
878  * @ent: entry in e1000_pci_tbl
879  *
880  * Returns 0 on success, negative on failure
881  *
882  * e1000_probe initializes an adapter identified by a pci_dev structure.
883  * The OS initialization, configuring of the adapter private structure,
884  * and a hardware reset occur.
885  **/
886
887 static int __devinit e1000_probe(struct pci_dev *pdev,
888                                  const struct pci_device_id *ent)
889 {
890         struct net_device *netdev;
891         struct e1000_adapter *adapter;
892         struct e1000_hw *hw;
893
894         static int cards_found = 0;
895         static int global_quad_port_a = 0; /* global ksp3 port a indication */
896         int i, err, pci_using_dac;
897         u16 eeprom_data = 0;
898         u16 eeprom_apme_mask = E1000_EEPROM_APME;
899         DECLARE_MAC_BUF(mac);
900
901         if ((err = pci_enable_device(pdev)))
902                 return err;
903
904         if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
905             !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
906                 pci_using_dac = 1;
907         } else {
908                 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
909                     (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
910                         E1000_ERR("No usable DMA configuration, aborting\n");
911                         goto err_dma;
912                 }
913                 pci_using_dac = 0;
914         }
915
916         if ((err = pci_request_regions(pdev, e1000_driver_name)))
917                 goto err_pci_reg;
918
919         pci_set_master(pdev);
920
921         err = -ENOMEM;
922         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
923         if (!netdev)
924                 goto err_alloc_etherdev;
925
926         SET_NETDEV_DEV(netdev, &pdev->dev);
927
928         pci_set_drvdata(pdev, netdev);
929         adapter = netdev_priv(netdev);
930         adapter->netdev = netdev;
931         adapter->pdev = pdev;
932         adapter->msg_enable = (1 << debug) - 1;
933
934         hw = &adapter->hw;
935         hw->back = adapter;
936
937         err = -EIO;
938         hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
939                               pci_resource_len(pdev, BAR_0));
940         if (!hw->hw_addr)
941                 goto err_ioremap;
942
943         for (i = BAR_1; i <= BAR_5; i++) {
944                 if (pci_resource_len(pdev, i) == 0)
945                         continue;
946                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
947                         hw->io_base = pci_resource_start(pdev, i);
948                         break;
949                 }
950         }
951
952         netdev->open = &e1000_open;
953         netdev->stop = &e1000_close;
954         netdev->hard_start_xmit = &e1000_xmit_frame;
955         netdev->get_stats = &e1000_get_stats;
956         netdev->set_rx_mode = &e1000_set_rx_mode;
957         netdev->set_mac_address = &e1000_set_mac;
958         netdev->change_mtu = &e1000_change_mtu;
959         netdev->do_ioctl = &e1000_ioctl;
960         e1000_set_ethtool_ops(netdev);
961         netdev->tx_timeout = &e1000_tx_timeout;
962         netdev->watchdog_timeo = 5 * HZ;
963 #ifdef CONFIG_E1000_NAPI
964         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
965 #endif
966         netdev->vlan_rx_register = e1000_vlan_rx_register;
967         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
968         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
969 #ifdef CONFIG_NET_POLL_CONTROLLER
970         netdev->poll_controller = e1000_netpoll;
971 #endif
972         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
973
974         adapter->bd_number = cards_found;
975
976         /* setup the private structure */
977
978         if ((err = e1000_sw_init(adapter)))
979                 goto err_sw_init;
980
981         err = -EIO;
982         /* Flash BAR mapping must happen after e1000_sw_init
983          * because it depends on mac_type */
984         if ((hw->mac_type == e1000_ich8lan) &&
985            (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
986                 hw->flash_address =
987                         ioremap(pci_resource_start(pdev, 1),
988                                 pci_resource_len(pdev, 1));
989                 if (!hw->flash_address)
990                         goto err_flashmap;
991         }
992
993         if (e1000_check_phy_reset_block(hw))
994                 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
995
996         if (hw->mac_type >= e1000_82543) {
997                 netdev->features = NETIF_F_SG |
998                                    NETIF_F_HW_CSUM |
999                                    NETIF_F_HW_VLAN_TX |
1000                                    NETIF_F_HW_VLAN_RX |
1001                                    NETIF_F_HW_VLAN_FILTER;
1002                 if (hw->mac_type == e1000_ich8lan)
1003                         netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1004         }
1005
1006         if ((hw->mac_type >= e1000_82544) &&
1007            (hw->mac_type != e1000_82547))
1008                 netdev->features |= NETIF_F_TSO;
1009
1010         if (hw->mac_type > e1000_82547_rev_2)
1011                 netdev->features |= NETIF_F_TSO6;
1012         if (pci_using_dac)
1013                 netdev->features |= NETIF_F_HIGHDMA;
1014
1015         netdev->features |= NETIF_F_LLTX;
1016
1017         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1018
1019         /* initialize eeprom parameters */
1020         if (e1000_init_eeprom_params(hw)) {
1021                 E1000_ERR("EEPROM initialization failed\n");
1022                 goto err_eeprom;
1023         }
1024
1025         /* before reading the EEPROM, reset the controller to
1026          * put the device in a known good starting state */
1027
1028         e1000_reset_hw(hw);
1029
1030         /* make sure the EEPROM is good */
1031         if (e1000_validate_eeprom_checksum(hw) < 0) {
1032                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
1033                 e1000_dump_eeprom(adapter);
1034                 /*
1035                  * set MAC address to all zeroes to invalidate and temporary
1036                  * disable this device for the user. This blocks regular
1037                  * traffic while still permitting ethtool ioctls from reaching
1038                  * the hardware as well as allowing the user to run the
1039                  * interface after manually setting a hw addr using
1040                  * `ip set address`
1041                  */
1042                 memset(hw->mac_addr, 0, netdev->addr_len);
1043         } else {
1044                 /* copy the MAC address out of the EEPROM */
1045                 if (e1000_read_mac_addr(hw))
1046                         DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1047         }
1048         /* don't block initalization here due to bad MAC address */
1049         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1050         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1051
1052         if (!is_valid_ether_addr(netdev->perm_addr))
1053                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1054
1055         e1000_get_bus_info(hw);
1056
1057         init_timer(&adapter->tx_fifo_stall_timer);
1058         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
1059         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
1060
1061         init_timer(&adapter->watchdog_timer);
1062         adapter->watchdog_timer.function = &e1000_watchdog;
1063         adapter->watchdog_timer.data = (unsigned long) adapter;
1064
1065         init_timer(&adapter->phy_info_timer);
1066         adapter->phy_info_timer.function = &e1000_update_phy_info;
1067         adapter->phy_info_timer.data = (unsigned long) adapter;
1068
1069         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1070
1071         e1000_check_options(adapter);
1072
1073         /* Initial Wake on LAN setting
1074          * If APM wake is enabled in the EEPROM,
1075          * enable the ACPI Magic Packet filter
1076          */
1077
1078         switch (hw->mac_type) {
1079         case e1000_82542_rev2_0:
1080         case e1000_82542_rev2_1:
1081         case e1000_82543:
1082                 break;
1083         case e1000_82544:
1084                 e1000_read_eeprom(hw,
1085                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1086                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1087                 break;
1088         case e1000_ich8lan:
1089                 e1000_read_eeprom(hw,
1090                         EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1091                 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1092                 break;
1093         case e1000_82546:
1094         case e1000_82546_rev_3:
1095         case e1000_82571:
1096         case e1000_80003es2lan:
1097                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1098                         e1000_read_eeprom(hw,
1099                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1100                         break;
1101                 }
1102                 /* Fall Through */
1103         default:
1104                 e1000_read_eeprom(hw,
1105                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1106                 break;
1107         }
1108         if (eeprom_data & eeprom_apme_mask)
1109                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1110
1111         /* now that we have the eeprom settings, apply the special cases
1112          * where the eeprom may be wrong or the board simply won't support
1113          * wake on lan on a particular port */
1114         switch (pdev->device) {
1115         case E1000_DEV_ID_82546GB_PCIE:
1116                 adapter->eeprom_wol = 0;
1117                 break;
1118         case E1000_DEV_ID_82546EB_FIBER:
1119         case E1000_DEV_ID_82546GB_FIBER:
1120         case E1000_DEV_ID_82571EB_FIBER:
1121                 /* Wake events only supported on port A for dual fiber
1122                  * regardless of eeprom setting */
1123                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1124                         adapter->eeprom_wol = 0;
1125                 break;
1126         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1127         case E1000_DEV_ID_82571EB_QUAD_COPPER:
1128         case E1000_DEV_ID_82571EB_QUAD_FIBER:
1129         case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1130         case E1000_DEV_ID_82571PT_QUAD_COPPER:
1131                 /* if quad port adapter, disable WoL on all but port A */
1132                 if (global_quad_port_a != 0)
1133                         adapter->eeprom_wol = 0;
1134                 else
1135                         adapter->quad_port_a = 1;
1136                 /* Reset for multiple quad port adapters */
1137                 if (++global_quad_port_a == 4)
1138                         global_quad_port_a = 0;
1139                 break;
1140         }
1141
1142         /* initialize the wol settings based on the eeprom settings */
1143         adapter->wol = adapter->eeprom_wol;
1144
1145         /* print bus type/speed/width info */
1146         DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1147                 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1148                  (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1149                 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1150                  (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1151                  (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1152                  (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1153                  (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1154                 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1155                  (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1156                  (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1157                  "32-bit"));
1158
1159         printk("%s\n", print_mac(mac, netdev->dev_addr));
1160
1161         if (hw->bus_type == e1000_bus_type_pci_express) {
1162                 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1163                         "longer be supported by this driver in the future.\n",
1164                         pdev->vendor, pdev->device);
1165                 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1166                         "driver instead.\n");
1167         }
1168
1169         /* reset the hardware with the new settings */
1170         e1000_reset(adapter);
1171
1172         /* If the controller is 82573 and f/w is AMT, do not set
1173          * DRV_LOAD until the interface is up.  For all other cases,
1174          * let the f/w know that the h/w is now under the control
1175          * of the driver. */
1176         if (hw->mac_type != e1000_82573 ||
1177             !e1000_check_mng_mode(hw))
1178                 e1000_get_hw_control(adapter);
1179
1180         /* tell the stack to leave us alone until e1000_open() is called */
1181         netif_carrier_off(netdev);
1182         netif_stop_queue(netdev);
1183
1184         strcpy(netdev->name, "eth%d");
1185         if ((err = register_netdev(netdev)))
1186                 goto err_register;
1187
1188         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1189
1190         cards_found++;
1191         return 0;
1192
1193 err_register:
1194         e1000_release_hw_control(adapter);
1195 err_eeprom:
1196         if (!e1000_check_phy_reset_block(hw))
1197                 e1000_phy_hw_reset(hw);
1198
1199         if (hw->flash_address)
1200                 iounmap(hw->flash_address);
1201 err_flashmap:
1202 #ifdef CONFIG_E1000_NAPI
1203         for (i = 0; i < adapter->num_rx_queues; i++)
1204                 dev_put(&adapter->polling_netdev[i]);
1205 #endif
1206
1207         kfree(adapter->tx_ring);
1208         kfree(adapter->rx_ring);
1209 #ifdef CONFIG_E1000_NAPI
1210         kfree(adapter->polling_netdev);
1211 #endif
1212 err_sw_init:
1213         iounmap(hw->hw_addr);
1214 err_ioremap:
1215         free_netdev(netdev);
1216 err_alloc_etherdev:
1217         pci_release_regions(pdev);
1218 err_pci_reg:
1219 err_dma:
1220         pci_disable_device(pdev);
1221         return err;
1222 }
1223
1224 /**
1225  * e1000_remove - Device Removal Routine
1226  * @pdev: PCI device information struct
1227  *
1228  * e1000_remove is called by the PCI subsystem to alert the driver
1229  * that it should release a PCI device.  The could be caused by a
1230  * Hot-Plug event, or because the driver is going to be removed from
1231  * memory.
1232  **/
1233
1234 static void __devexit e1000_remove(struct pci_dev *pdev)
1235 {
1236         struct net_device *netdev = pci_get_drvdata(pdev);
1237         struct e1000_adapter *adapter = netdev_priv(netdev);
1238         struct e1000_hw *hw = &adapter->hw;
1239 #ifdef CONFIG_E1000_NAPI
1240         int i;
1241 #endif
1242
1243         cancel_work_sync(&adapter->reset_task);
1244
1245         e1000_release_manageability(adapter);
1246
1247         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1248          * would have already happened in close and is redundant. */
1249         e1000_release_hw_control(adapter);
1250
1251 #ifdef CONFIG_E1000_NAPI
1252         for (i = 0; i < adapter->num_rx_queues; i++)
1253                 dev_put(&adapter->polling_netdev[i]);
1254 #endif
1255
1256         unregister_netdev(netdev);
1257
1258         if (!e1000_check_phy_reset_block(hw))
1259                 e1000_phy_hw_reset(hw);
1260
1261         kfree(adapter->tx_ring);
1262         kfree(adapter->rx_ring);
1263 #ifdef CONFIG_E1000_NAPI
1264         kfree(adapter->polling_netdev);
1265 #endif
1266
1267         iounmap(hw->hw_addr);
1268         if (hw->flash_address)
1269                 iounmap(hw->flash_address);
1270         pci_release_regions(pdev);
1271
1272         free_netdev(netdev);
1273
1274         pci_disable_device(pdev);
1275 }
1276
1277 /**
1278  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1279  * @adapter: board private structure to initialize
1280  *
1281  * e1000_sw_init initializes the Adapter private data structure.
1282  * Fields are initialized based on PCI device information and
1283  * OS network device settings (MTU size).
1284  **/
1285
1286 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1287 {
1288         struct e1000_hw *hw = &adapter->hw;
1289         struct net_device *netdev = adapter->netdev;
1290         struct pci_dev *pdev = adapter->pdev;
1291 #ifdef CONFIG_E1000_NAPI
1292         int i;
1293 #endif
1294
1295         /* PCI config space info */
1296
1297         hw->vendor_id = pdev->vendor;
1298         hw->device_id = pdev->device;
1299         hw->subsystem_vendor_id = pdev->subsystem_vendor;
1300         hw->subsystem_id = pdev->subsystem_device;
1301         hw->revision_id = pdev->revision;
1302
1303         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1304
1305         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1306         adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1307         hw->max_frame_size = netdev->mtu +
1308                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1309         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1310
1311         /* identify the MAC */
1312
1313         if (e1000_set_mac_type(hw)) {
1314                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1315                 return -EIO;
1316         }
1317
1318         switch (hw->mac_type) {
1319         default:
1320                 break;
1321         case e1000_82541:
1322         case e1000_82547:
1323         case e1000_82541_rev_2:
1324         case e1000_82547_rev_2:
1325                 hw->phy_init_script = 1;
1326                 break;
1327         }
1328
1329         e1000_set_media_type(hw);
1330
1331         hw->wait_autoneg_complete = false;
1332         hw->tbi_compatibility_en = true;
1333         hw->adaptive_ifs = true;
1334
1335         /* Copper options */
1336
1337         if (hw->media_type == e1000_media_type_copper) {
1338                 hw->mdix = AUTO_ALL_MODES;
1339                 hw->disable_polarity_correction = false;
1340                 hw->master_slave = E1000_MASTER_SLAVE;
1341         }
1342
1343         adapter->num_tx_queues = 1;
1344         adapter->num_rx_queues = 1;
1345
1346         if (e1000_alloc_queues(adapter)) {
1347                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1348                 return -ENOMEM;
1349         }
1350
1351 #ifdef CONFIG_E1000_NAPI
1352         for (i = 0; i < adapter->num_rx_queues; i++) {
1353                 adapter->polling_netdev[i].priv = adapter;
1354                 dev_hold(&adapter->polling_netdev[i]);
1355                 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1356         }
1357         spin_lock_init(&adapter->tx_queue_lock);
1358 #endif
1359
1360         /* Explicitly disable IRQ since the NIC can be in any state. */
1361         e1000_irq_disable(adapter);
1362
1363         spin_lock_init(&adapter->stats_lock);
1364
1365         set_bit(__E1000_DOWN, &adapter->flags);
1366
1367         return 0;
1368 }
1369
1370 /**
1371  * e1000_alloc_queues - Allocate memory for all rings
1372  * @adapter: board private structure to initialize
1373  *
1374  * We allocate one ring per queue at run-time since we don't know the
1375  * number of queues at compile-time.  The polling_netdev array is
1376  * intended for Multiqueue, but should work fine with a single queue.
1377  **/
1378
1379 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1380 {
1381         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1382                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1383         if (!adapter->tx_ring)
1384                 return -ENOMEM;
1385
1386         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1387                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1388         if (!adapter->rx_ring) {
1389                 kfree(adapter->tx_ring);
1390                 return -ENOMEM;
1391         }
1392
1393 #ifdef CONFIG_E1000_NAPI
1394         adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1395                                           sizeof(struct net_device),
1396                                           GFP_KERNEL);
1397         if (!adapter->polling_netdev) {
1398                 kfree(adapter->tx_ring);
1399                 kfree(adapter->rx_ring);
1400                 return -ENOMEM;
1401         }
1402 #endif
1403
1404         return E1000_SUCCESS;
1405 }
1406
1407 /**
1408  * e1000_open - Called when a network interface is made active
1409  * @netdev: network interface device structure
1410  *
1411  * Returns 0 on success, negative value on failure
1412  *
1413  * The open entry point is called when a network interface is made
1414  * active by the system (IFF_UP).  At this point all resources needed
1415  * for transmit and receive operations are allocated, the interrupt
1416  * handler is registered with the OS, the watchdog timer is started,
1417  * and the stack is notified that the interface is ready.
1418  **/
1419
1420 static int e1000_open(struct net_device *netdev)
1421 {
1422         struct e1000_adapter *adapter = netdev_priv(netdev);
1423         struct e1000_hw *hw = &adapter->hw;
1424         int err;
1425
1426         /* disallow open during test */
1427         if (test_bit(__E1000_TESTING, &adapter->flags))
1428                 return -EBUSY;
1429
1430         /* allocate transmit descriptors */
1431         err = e1000_setup_all_tx_resources(adapter);
1432         if (err)
1433                 goto err_setup_tx;
1434
1435         /* allocate receive descriptors */
1436         err = e1000_setup_all_rx_resources(adapter);
1437         if (err)
1438                 goto err_setup_rx;
1439
1440         e1000_power_up_phy(adapter);
1441
1442         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1443         if ((hw->mng_cookie.status &
1444                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1445                 e1000_update_mng_vlan(adapter);
1446         }
1447
1448         /* If AMT is enabled, let the firmware know that the network
1449          * interface is now open */
1450         if (hw->mac_type == e1000_82573 &&
1451             e1000_check_mng_mode(hw))
1452                 e1000_get_hw_control(adapter);
1453
1454         /* before we allocate an interrupt, we must be ready to handle it.
1455          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1456          * as soon as we call pci_request_irq, so we have to setup our
1457          * clean_rx handler before we do so.  */
1458         e1000_configure(adapter);
1459
1460         err = e1000_request_irq(adapter);
1461         if (err)
1462                 goto err_req_irq;
1463
1464         /* From here on the code is the same as e1000_up() */
1465         clear_bit(__E1000_DOWN, &adapter->flags);
1466
1467 #ifdef CONFIG_E1000_NAPI
1468         napi_enable(&adapter->napi);
1469 #endif
1470
1471         e1000_irq_enable(adapter);
1472
1473         netif_start_queue(netdev);
1474
1475         /* fire a link status change interrupt to start the watchdog */
1476         ew32(ICS, E1000_ICS_LSC);
1477
1478         return E1000_SUCCESS;
1479
1480 err_req_irq:
1481         e1000_release_hw_control(adapter);
1482         e1000_power_down_phy(adapter);
1483         e1000_free_all_rx_resources(adapter);
1484 err_setup_rx:
1485         e1000_free_all_tx_resources(adapter);
1486 err_setup_tx:
1487         e1000_reset(adapter);
1488
1489         return err;
1490 }
1491
1492 /**
1493  * e1000_close - Disables a network interface
1494  * @netdev: network interface device structure
1495  *
1496  * Returns 0, this is not allowed to fail
1497  *
1498  * The close entry point is called when an interface is de-activated
1499  * by the OS.  The hardware is still under the drivers control, but
1500  * needs to be disabled.  A global MAC reset is issued to stop the
1501  * hardware, and all transmit and receive resources are freed.
1502  **/
1503
1504 static int e1000_close(struct net_device *netdev)
1505 {
1506         struct e1000_adapter *adapter = netdev_priv(netdev);
1507         struct e1000_hw *hw = &adapter->hw;
1508
1509         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1510         e1000_down(adapter);
1511         e1000_power_down_phy(adapter);
1512         e1000_free_irq(adapter);
1513
1514         e1000_free_all_tx_resources(adapter);
1515         e1000_free_all_rx_resources(adapter);
1516
1517         /* kill manageability vlan ID if supported, but not if a vlan with
1518          * the same ID is registered on the host OS (let 8021q kill it) */
1519         if ((hw->mng_cookie.status &
1520                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1521              !(adapter->vlgrp &&
1522                vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
1523                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1524         }
1525
1526         /* If AMT is enabled, let the firmware know that the network
1527          * interface is now closed */
1528         if (hw->mac_type == e1000_82573 &&
1529             e1000_check_mng_mode(hw))
1530                 e1000_release_hw_control(adapter);
1531
1532         return 0;
1533 }
1534
1535 /**
1536  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1537  * @adapter: address of board private structure
1538  * @start: address of beginning of memory
1539  * @len: length of memory
1540  **/
1541 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1542                                   unsigned long len)
1543 {
1544         struct e1000_hw *hw = &adapter->hw;
1545         unsigned long begin = (unsigned long) start;
1546         unsigned long end = begin + len;
1547
1548         /* First rev 82545 and 82546 need to not allow any memory
1549          * write location to cross 64k boundary due to errata 23 */
1550         if (hw->mac_type == e1000_82545 ||
1551             hw->mac_type == e1000_82546) {
1552                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1553         }
1554
1555         return true;
1556 }
1557
1558 /**
1559  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1560  * @adapter: board private structure
1561  * @txdr:    tx descriptor ring (for a specific queue) to setup
1562  *
1563  * Return 0 on success, negative on failure
1564  **/
1565
1566 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1567                                     struct e1000_tx_ring *txdr)
1568 {
1569         struct pci_dev *pdev = adapter->pdev;
1570         int size;
1571
1572         size = sizeof(struct e1000_buffer) * txdr->count;
1573         txdr->buffer_info = vmalloc(size);
1574         if (!txdr->buffer_info) {
1575                 DPRINTK(PROBE, ERR,
1576                 "Unable to allocate memory for the transmit descriptor ring\n");
1577                 return -ENOMEM;
1578         }
1579         memset(txdr->buffer_info, 0, size);
1580
1581         /* round up to nearest 4K */
1582
1583         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1584         txdr->size = ALIGN(txdr->size, 4096);
1585
1586         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1587         if (!txdr->desc) {
1588 setup_tx_desc_die:
1589                 vfree(txdr->buffer_info);
1590                 DPRINTK(PROBE, ERR,
1591                 "Unable to allocate memory for the transmit descriptor ring\n");
1592                 return -ENOMEM;
1593         }
1594
1595         /* Fix for errata 23, can't cross 64kB boundary */
1596         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1597                 void *olddesc = txdr->desc;
1598                 dma_addr_t olddma = txdr->dma;
1599                 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1600                                      "at %p\n", txdr->size, txdr->desc);
1601                 /* Try again, without freeing the previous */
1602                 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1603                 /* Failed allocation, critical failure */
1604                 if (!txdr->desc) {
1605                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1606                         goto setup_tx_desc_die;
1607                 }
1608
1609                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1610                         /* give up */
1611                         pci_free_consistent(pdev, txdr->size, txdr->desc,
1612                                             txdr->dma);
1613                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1614                         DPRINTK(PROBE, ERR,
1615                                 "Unable to allocate aligned memory "
1616                                 "for the transmit descriptor ring\n");
1617                         vfree(txdr->buffer_info);
1618                         return -ENOMEM;
1619                 } else {
1620                         /* Free old allocation, new allocation was successful */
1621                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1622                 }
1623         }
1624         memset(txdr->desc, 0, txdr->size);
1625
1626         txdr->next_to_use = 0;
1627         txdr->next_to_clean = 0;
1628         spin_lock_init(&txdr->tx_lock);
1629
1630         return 0;
1631 }
1632
1633 /**
1634  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1635  *                                (Descriptors) for all queues
1636  * @adapter: board private structure
1637  *
1638  * Return 0 on success, negative on failure
1639  **/
1640
1641 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1642 {
1643         int i, err = 0;
1644
1645         for (i = 0; i < adapter->num_tx_queues; i++) {
1646                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1647                 if (err) {
1648                         DPRINTK(PROBE, ERR,
1649                                 "Allocation for Tx Queue %u failed\n", i);
1650                         for (i-- ; i >= 0; i--)
1651                                 e1000_free_tx_resources(adapter,
1652                                                         &adapter->tx_ring[i]);
1653                         break;
1654                 }
1655         }
1656
1657         return err;
1658 }
1659
1660 /**
1661  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1662  * @adapter: board private structure
1663  *
1664  * Configure the Tx unit of the MAC after a reset.
1665  **/
1666
1667 static void e1000_configure_tx(struct e1000_adapter *adapter)
1668 {
1669         u64 tdba;
1670         struct e1000_hw *hw = &adapter->hw;
1671         u32 tdlen, tctl, tipg, tarc;
1672         u32 ipgr1, ipgr2;
1673
1674         /* Setup the HW Tx Head and Tail descriptor pointers */
1675
1676         switch (adapter->num_tx_queues) {
1677         case 1:
1678         default:
1679                 tdba = adapter->tx_ring[0].dma;
1680                 tdlen = adapter->tx_ring[0].count *
1681                         sizeof(struct e1000_tx_desc);
1682                 ew32(TDLEN, tdlen);
1683                 ew32(TDBAH, (tdba >> 32));
1684                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1685                 ew32(TDT, 0);
1686                 ew32(TDH, 0);
1687                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1688                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1689                 break;
1690         }
1691
1692         /* Set the default values for the Tx Inter Packet Gap timer */
1693         if (hw->mac_type <= e1000_82547_rev_2 &&
1694             (hw->media_type == e1000_media_type_fiber ||
1695              hw->media_type == e1000_media_type_internal_serdes))
1696                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1697         else
1698                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1699
1700         switch (hw->mac_type) {
1701         case e1000_82542_rev2_0:
1702         case e1000_82542_rev2_1:
1703                 tipg = DEFAULT_82542_TIPG_IPGT;
1704                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1705                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1706                 break;
1707         case e1000_80003es2lan:
1708                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1709                 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1710                 break;
1711         default:
1712                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1713                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1714                 break;
1715         }
1716         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1717         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1718         ew32(TIPG, tipg);
1719
1720         /* Set the Tx Interrupt Delay register */
1721
1722         ew32(TIDV, adapter->tx_int_delay);
1723         if (hw->mac_type >= e1000_82540)
1724                 ew32(TADV, adapter->tx_abs_int_delay);
1725
1726         /* Program the Transmit Control Register */
1727
1728         tctl = er32(TCTL);
1729         tctl &= ~E1000_TCTL_CT;
1730         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1731                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1732
1733         if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1734                 tarc = er32(TARC0);
1735                 /* set the speed mode bit, we'll clear it if we're not at
1736                  * gigabit link later */
1737                 tarc |= (1 << 21);
1738                 ew32(TARC0, tarc);
1739         } else if (hw->mac_type == e1000_80003es2lan) {
1740                 tarc = er32(TARC0);
1741                 tarc |= 1;
1742                 ew32(TARC0, tarc);
1743                 tarc = er32(TARC1);
1744                 tarc |= 1;
1745                 ew32(TARC1, tarc);
1746         }
1747
1748         e1000_config_collision_dist(hw);
1749
1750         /* Setup Transmit Descriptor Settings for eop descriptor */
1751         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1752
1753         /* only set IDE if we are delaying interrupts using the timers */
1754         if (adapter->tx_int_delay)
1755                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1756
1757         if (hw->mac_type < e1000_82543)
1758                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1759         else
1760                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1761
1762         /* Cache if we're 82544 running in PCI-X because we'll
1763          * need this to apply a workaround later in the send path. */
1764         if (hw->mac_type == e1000_82544 &&
1765             hw->bus_type == e1000_bus_type_pcix)
1766                 adapter->pcix_82544 = 1;
1767
1768         ew32(TCTL, tctl);
1769
1770 }
1771
1772 /**
1773  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1774  * @adapter: board private structure
1775  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1776  *
1777  * Returns 0 on success, negative on failure
1778  **/
1779
1780 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1781                                     struct e1000_rx_ring *rxdr)
1782 {
1783         struct e1000_hw *hw = &adapter->hw;
1784         struct pci_dev *pdev = adapter->pdev;
1785         int size, desc_len;
1786
1787         size = sizeof(struct e1000_buffer) * rxdr->count;
1788         rxdr->buffer_info = vmalloc(size);
1789         if (!rxdr->buffer_info) {
1790                 DPRINTK(PROBE, ERR,
1791                 "Unable to allocate memory for the receive descriptor ring\n");
1792                 return -ENOMEM;
1793         }
1794         memset(rxdr->buffer_info, 0, size);
1795
1796         rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1797                                 GFP_KERNEL);
1798         if (!rxdr->ps_page) {
1799                 vfree(rxdr->buffer_info);
1800                 DPRINTK(PROBE, ERR,
1801                 "Unable to allocate memory for the receive descriptor ring\n");
1802                 return -ENOMEM;
1803         }
1804
1805         rxdr->ps_page_dma = kcalloc(rxdr->count,
1806                                     sizeof(struct e1000_ps_page_dma),
1807                                     GFP_KERNEL);
1808         if (!rxdr->ps_page_dma) {
1809                 vfree(rxdr->buffer_info);
1810                 kfree(rxdr->ps_page);
1811                 DPRINTK(PROBE, ERR,
1812                 "Unable to allocate memory for the receive descriptor ring\n");
1813                 return -ENOMEM;
1814         }
1815
1816         if (hw->mac_type <= e1000_82547_rev_2)
1817                 desc_len = sizeof(struct e1000_rx_desc);
1818         else
1819                 desc_len = sizeof(union e1000_rx_desc_packet_split);
1820
1821         /* Round up to nearest 4K */
1822
1823         rxdr->size = rxdr->count * desc_len;
1824         rxdr->size = ALIGN(rxdr->size, 4096);
1825
1826         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1827
1828         if (!rxdr->desc) {
1829                 DPRINTK(PROBE, ERR,
1830                 "Unable to allocate memory for the receive descriptor ring\n");
1831 setup_rx_desc_die:
1832                 vfree(rxdr->buffer_info);
1833                 kfree(rxdr->ps_page);
1834                 kfree(rxdr->ps_page_dma);
1835                 return -ENOMEM;
1836         }
1837
1838         /* Fix for errata 23, can't cross 64kB boundary */
1839         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1840                 void *olddesc = rxdr->desc;
1841                 dma_addr_t olddma = rxdr->dma;
1842                 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1843                                      "at %p\n", rxdr->size, rxdr->desc);
1844                 /* Try again, without freeing the previous */
1845                 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1846                 /* Failed allocation, critical failure */
1847                 if (!rxdr->desc) {
1848                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1849                         DPRINTK(PROBE, ERR,
1850                                 "Unable to allocate memory "
1851                                 "for the receive descriptor ring\n");
1852                         goto setup_rx_desc_die;
1853                 }
1854
1855                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1856                         /* give up */
1857                         pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1858                                             rxdr->dma);
1859                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1860                         DPRINTK(PROBE, ERR,
1861                                 "Unable to allocate aligned memory "
1862                                 "for the receive descriptor ring\n");
1863                         goto setup_rx_desc_die;
1864                 } else {
1865                         /* Free old allocation, new allocation was successful */
1866                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1867                 }
1868         }
1869         memset(rxdr->desc, 0, rxdr->size);
1870
1871         rxdr->next_to_clean = 0;
1872         rxdr->next_to_use = 0;
1873
1874         return 0;
1875 }
1876
1877 /**
1878  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1879  *                                (Descriptors) for all queues
1880  * @adapter: board private structure
1881  *
1882  * Return 0 on success, negative on failure
1883  **/
1884
1885 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1886 {
1887         int i, err = 0;
1888
1889         for (i = 0; i < adapter->num_rx_queues; i++) {
1890                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1891                 if (err) {
1892                         DPRINTK(PROBE, ERR,
1893                                 "Allocation for Rx Queue %u failed\n", i);
1894                         for (i-- ; i >= 0; i--)
1895                                 e1000_free_rx_resources(adapter,
1896                                                         &adapter->rx_ring[i]);
1897                         break;
1898                 }
1899         }
1900
1901         return err;
1902 }
1903
1904 /**
1905  * e1000_setup_rctl - configure the receive control registers
1906  * @adapter: Board private structure
1907  **/
1908 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1909                         (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1910 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1911 {
1912         struct e1000_hw *hw = &adapter->hw;
1913         u32 rctl, rfctl;
1914         u32 psrctl = 0;
1915 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1916         u32 pages = 0;
1917 #endif
1918
1919         rctl = er32(RCTL);
1920
1921         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1922
1923         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1924                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1925                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1926
1927         if (hw->tbi_compatibility_on == 1)
1928                 rctl |= E1000_RCTL_SBP;
1929         else
1930                 rctl &= ~E1000_RCTL_SBP;
1931
1932         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1933                 rctl &= ~E1000_RCTL_LPE;
1934         else
1935                 rctl |= E1000_RCTL_LPE;
1936
1937         /* Setup buffer sizes */
1938         rctl &= ~E1000_RCTL_SZ_4096;
1939         rctl |= E1000_RCTL_BSEX;
1940         switch (adapter->rx_buffer_len) {
1941                 case E1000_RXBUFFER_256:
1942                         rctl |= E1000_RCTL_SZ_256;
1943                         rctl &= ~E1000_RCTL_BSEX;
1944                         break;
1945                 case E1000_RXBUFFER_512:
1946                         rctl |= E1000_RCTL_SZ_512;
1947                         rctl &= ~E1000_RCTL_BSEX;
1948                         break;
1949                 case E1000_RXBUFFER_1024:
1950                         rctl |= E1000_RCTL_SZ_1024;
1951                         rctl &= ~E1000_RCTL_BSEX;
1952                         break;
1953                 case E1000_RXBUFFER_2048:
1954                 default:
1955                         rctl |= E1000_RCTL_SZ_2048;
1956                         rctl &= ~E1000_RCTL_BSEX;
1957                         break;
1958                 case E1000_RXBUFFER_4096:
1959                         rctl |= E1000_RCTL_SZ_4096;
1960                         break;
1961                 case E1000_RXBUFFER_8192:
1962                         rctl |= E1000_RCTL_SZ_8192;
1963                         break;
1964                 case E1000_RXBUFFER_16384:
1965                         rctl |= E1000_RCTL_SZ_16384;
1966                         break;
1967         }
1968
1969 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1970         /* 82571 and greater support packet-split where the protocol
1971          * header is placed in skb->data and the packet data is
1972          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1973          * In the case of a non-split, skb->data is linearly filled,
1974          * followed by the page buffers.  Therefore, skb->data is
1975          * sized to hold the largest protocol header.
1976          */
1977         /* allocations using alloc_page take too long for regular MTU
1978          * so only enable packet split for jumbo frames */
1979         pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1980         if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
1981             PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
1982                 adapter->rx_ps_pages = pages;
1983         else
1984                 adapter->rx_ps_pages = 0;
1985 #endif
1986         if (adapter->rx_ps_pages) {
1987                 /* Configure extra packet-split registers */
1988                 rfctl = er32(RFCTL);
1989                 rfctl |= E1000_RFCTL_EXTEN;
1990                 /* disable packet split support for IPv6 extension headers,
1991                  * because some malformed IPv6 headers can hang the RX */
1992                 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1993                           E1000_RFCTL_NEW_IPV6_EXT_DIS);
1994
1995                 ew32(RFCTL, rfctl);
1996
1997                 rctl |= E1000_RCTL_DTYP_PS;
1998
1999                 psrctl |= adapter->rx_ps_bsize0 >>
2000                         E1000_PSRCTL_BSIZE0_SHIFT;
2001
2002                 switch (adapter->rx_ps_pages) {
2003                 case 3:
2004                         psrctl |= PAGE_SIZE <<
2005                                 E1000_PSRCTL_BSIZE3_SHIFT;
2006                 case 2:
2007                         psrctl |= PAGE_SIZE <<
2008                                 E1000_PSRCTL_BSIZE2_SHIFT;
2009                 case 1:
2010                         psrctl |= PAGE_SIZE >>
2011                                 E1000_PSRCTL_BSIZE1_SHIFT;
2012                         break;
2013                 }
2014
2015                 ew32(PSRCTL, psrctl);
2016         }
2017
2018         ew32(RCTL, rctl);
2019 }
2020
2021 /**
2022  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
2023  * @adapter: board private structure
2024  *
2025  * Configure the Rx unit of the MAC after a reset.
2026  **/
2027
2028 static void e1000_configure_rx(struct e1000_adapter *adapter)
2029 {
2030         u64 rdba;
2031         struct e1000_hw *hw = &adapter->hw;
2032         u32 rdlen, rctl, rxcsum, ctrl_ext;
2033
2034         if (adapter->rx_ps_pages) {
2035                 /* this is a 32 byte descriptor */
2036                 rdlen = adapter->rx_ring[0].count *
2037                         sizeof(union e1000_rx_desc_packet_split);
2038                 adapter->clean_rx = e1000_clean_rx_irq_ps;
2039                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2040         } else {
2041                 rdlen = adapter->rx_ring[0].count *
2042                         sizeof(struct e1000_rx_desc);
2043                 adapter->clean_rx = e1000_clean_rx_irq;
2044                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2045         }
2046
2047         /* disable receives while setting up the descriptors */
2048         rctl = er32(RCTL);
2049         ew32(RCTL, rctl & ~E1000_RCTL_EN);
2050
2051         /* set the Receive Delay Timer Register */
2052         ew32(RDTR, adapter->rx_int_delay);
2053
2054         if (hw->mac_type >= e1000_82540) {
2055                 ew32(RADV, adapter->rx_abs_int_delay);
2056                 if (adapter->itr_setting != 0)
2057                         ew32(ITR, 1000000000 / (adapter->itr * 256));
2058         }
2059
2060         if (hw->mac_type >= e1000_82571) {
2061                 ctrl_ext = er32(CTRL_EXT);
2062                 /* Reset delay timers after every interrupt */
2063                 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2064 #ifdef CONFIG_E1000_NAPI
2065                 /* Auto-Mask interrupts upon ICR access */
2066                 ctrl_ext |= E1000_CTRL_EXT_IAME;
2067                 ew32(IAM, 0xffffffff);
2068 #endif
2069                 ew32(CTRL_EXT, ctrl_ext);
2070                 E1000_WRITE_FLUSH();
2071         }
2072
2073         /* Setup the HW Rx Head and Tail Descriptor Pointers and
2074          * the Base and Length of the Rx Descriptor Ring */
2075         switch (adapter->num_rx_queues) {
2076         case 1:
2077         default:
2078                 rdba = adapter->rx_ring[0].dma;
2079                 ew32(RDLEN, rdlen);
2080                 ew32(RDBAH, (rdba >> 32));
2081                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
2082                 ew32(RDT, 0);
2083                 ew32(RDH, 0);
2084                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2085                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
2086                 break;
2087         }
2088
2089         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2090         if (hw->mac_type >= e1000_82543) {
2091                 rxcsum = er32(RXCSUM);
2092                 if (adapter->rx_csum) {
2093                         rxcsum |= E1000_RXCSUM_TUOFL;
2094
2095                         /* Enable 82571 IPv4 payload checksum for UDP fragments
2096                          * Must be used in conjunction with packet-split. */
2097                         if ((hw->mac_type >= e1000_82571) &&
2098                             (adapter->rx_ps_pages)) {
2099                                 rxcsum |= E1000_RXCSUM_IPPCSE;
2100                         }
2101                 } else {
2102                         rxcsum &= ~E1000_RXCSUM_TUOFL;
2103                         /* don't need to clear IPPCSE as it defaults to 0 */
2104                 }
2105                 ew32(RXCSUM, rxcsum);
2106         }
2107
2108         /* enable early receives on 82573, only takes effect if using > 2048
2109          * byte total frame size.  for example only for jumbo frames */
2110 #define E1000_ERT_2048 0x100
2111         if (hw->mac_type == e1000_82573)
2112                 ew32(ERT, E1000_ERT_2048);
2113
2114         /* Enable Receives */
2115         ew32(RCTL, rctl);
2116 }
2117
2118 /**
2119  * e1000_free_tx_resources - Free Tx Resources per Queue
2120  * @adapter: board private structure
2121  * @tx_ring: Tx descriptor ring for a specific queue
2122  *
2123  * Free all transmit software resources
2124  **/
2125
2126 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
2127                                     struct e1000_tx_ring *tx_ring)
2128 {
2129         struct pci_dev *pdev = adapter->pdev;
2130
2131         e1000_clean_tx_ring(adapter, tx_ring);
2132
2133         vfree(tx_ring->buffer_info);
2134         tx_ring->buffer_info = NULL;
2135
2136         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2137
2138         tx_ring->desc = NULL;
2139 }
2140
2141 /**
2142  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2143  * @adapter: board private structure
2144  *
2145  * Free all transmit software resources
2146  **/
2147
2148 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2149 {
2150         int i;
2151
2152         for (i = 0; i < adapter->num_tx_queues; i++)
2153                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
2154 }
2155
2156 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2157                                              struct e1000_buffer *buffer_info)
2158 {
2159         if (buffer_info->dma) {
2160                 pci_unmap_page(adapter->pdev,
2161                                 buffer_info->dma,
2162                                 buffer_info->length,
2163                                 PCI_DMA_TODEVICE);
2164                 buffer_info->dma = 0;
2165         }
2166         if (buffer_info->skb) {
2167                 dev_kfree_skb_any(buffer_info->skb);
2168                 buffer_info->skb = NULL;
2169         }
2170         /* buffer_info must be completely set up in the transmit path */
2171 }
2172
2173 /**
2174  * e1000_clean_tx_ring - Free Tx Buffers
2175  * @adapter: board private structure
2176  * @tx_ring: ring to be cleaned
2177  **/
2178
2179 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2180                                 struct e1000_tx_ring *tx_ring)
2181 {
2182         struct e1000_hw *hw = &adapter->hw;
2183         struct e1000_buffer *buffer_info;
2184         unsigned long size;
2185         unsigned int i;
2186
2187         /* Free all the Tx ring sk_buffs */
2188
2189         for (i = 0; i < tx_ring->count; i++) {
2190                 buffer_info = &tx_ring->buffer_info[i];
2191                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2192         }
2193
2194         size = sizeof(struct e1000_buffer) * tx_ring->count;
2195         memset(tx_ring->buffer_info, 0, size);
2196
2197         /* Zero out the descriptor ring */
2198
2199         memset(tx_ring->desc, 0, tx_ring->size);
2200
2201         tx_ring->next_to_use = 0;
2202         tx_ring->next_to_clean = 0;
2203         tx_ring->last_tx_tso = 0;
2204
2205         writel(0, hw->hw_addr + tx_ring->tdh);
2206         writel(0, hw->hw_addr + tx_ring->tdt);
2207 }
2208
2209 /**
2210  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2211  * @adapter: board private structure
2212  **/
2213
2214 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2215 {
2216         int i;
2217
2218         for (i = 0; i < adapter->num_tx_queues; i++)
2219                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2220 }
2221
2222 /**
2223  * e1000_free_rx_resources - Free Rx Resources
2224  * @adapter: board private structure
2225  * @rx_ring: ring to clean the resources from
2226  *
2227  * Free all receive software resources
2228  **/
2229
2230 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2231                                     struct e1000_rx_ring *rx_ring)
2232 {
2233         struct pci_dev *pdev = adapter->pdev;
2234
2235         e1000_clean_rx_ring(adapter, rx_ring);
2236
2237         vfree(rx_ring->buffer_info);
2238         rx_ring->buffer_info = NULL;
2239         kfree(rx_ring->ps_page);
2240         rx_ring->ps_page = NULL;
2241         kfree(rx_ring->ps_page_dma);
2242         rx_ring->ps_page_dma = NULL;
2243
2244         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2245
2246         rx_ring->desc = NULL;
2247 }
2248
2249 /**
2250  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2251  * @adapter: board private structure
2252  *
2253  * Free all receive software resources
2254  **/
2255
2256 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2257 {
2258         int i;
2259
2260         for (i = 0; i < adapter->num_rx_queues; i++)
2261                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2262 }
2263
2264 /**
2265  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2266  * @adapter: board private structure
2267  * @rx_ring: ring to free buffers from
2268  **/
2269
2270 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2271                                 struct e1000_rx_ring *rx_ring)
2272 {
2273         struct e1000_hw *hw = &adapter->hw;
2274         struct e1000_buffer *buffer_info;
2275         struct e1000_ps_page *ps_page;
2276         struct e1000_ps_page_dma *ps_page_dma;
2277         struct pci_dev *pdev = adapter->pdev;
2278         unsigned long size;
2279         unsigned int i, j;
2280
2281         /* Free all the Rx ring sk_buffs */
2282         for (i = 0; i < rx_ring->count; i++) {
2283                 buffer_info = &rx_ring->buffer_info[i];
2284                 if (buffer_info->skb) {
2285                         pci_unmap_single(pdev,
2286                                          buffer_info->dma,
2287                                          buffer_info->length,
2288                                          PCI_DMA_FROMDEVICE);
2289
2290                         dev_kfree_skb(buffer_info->skb);
2291                         buffer_info->skb = NULL;
2292                 }
2293                 ps_page = &rx_ring->ps_page[i];
2294                 ps_page_dma = &rx_ring->ps_page_dma[i];
2295                 for (j = 0; j < adapter->rx_ps_pages; j++) {
2296                         if (!ps_page->ps_page[j]) break;
2297                         pci_unmap_page(pdev,
2298                                        ps_page_dma->ps_page_dma[j],
2299                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2300                         ps_page_dma->ps_page_dma[j] = 0;
2301                         put_page(ps_page->ps_page[j]);
2302                         ps_page->ps_page[j] = NULL;
2303                 }
2304         }
2305
2306         size = sizeof(struct e1000_buffer) * rx_ring->count;
2307         memset(rx_ring->buffer_info, 0, size);
2308         size = sizeof(struct e1000_ps_page) * rx_ring->count;
2309         memset(rx_ring->ps_page, 0, size);
2310         size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2311         memset(rx_ring->ps_page_dma, 0, size);
2312
2313         /* Zero out the descriptor ring */
2314
2315         memset(rx_ring->desc, 0, rx_ring->size);
2316
2317         rx_ring->next_to_clean = 0;
2318         rx_ring->next_to_use = 0;
2319
2320         writel(0, hw->hw_addr + rx_ring->rdh);
2321         writel(0, hw->hw_addr + rx_ring->rdt);
2322 }
2323
2324 /**
2325  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2326  * @adapter: board private structure
2327  **/
2328
2329 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2330 {
2331         int i;
2332
2333         for (i = 0; i < adapter->num_rx_queues; i++)
2334                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2335 }
2336
2337 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2338  * and memory write and invalidate disabled for certain operations
2339  */
2340 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2341 {
2342         struct e1000_hw *hw = &adapter->hw;
2343         struct net_device *netdev = adapter->netdev;
2344         u32 rctl;
2345
2346         e1000_pci_clear_mwi(hw);
2347
2348         rctl = er32(RCTL);
2349         rctl |= E1000_RCTL_RST;
2350         ew32(RCTL, rctl);
2351         E1000_WRITE_FLUSH();
2352         mdelay(5);
2353
2354         if (netif_running(netdev))
2355                 e1000_clean_all_rx_rings(adapter);
2356 }
2357
2358 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2359 {
2360         struct e1000_hw *hw = &adapter->hw;
2361         struct net_device *netdev = adapter->netdev;
2362         u32 rctl;
2363
2364         rctl = er32(RCTL);
2365         rctl &= ~E1000_RCTL_RST;
2366         ew32(RCTL, rctl);
2367         E1000_WRITE_FLUSH();
2368         mdelay(5);
2369
2370         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2371                 e1000_pci_set_mwi(hw);
2372
2373         if (netif_running(netdev)) {
2374                 /* No need to loop, because 82542 supports only 1 queue */
2375                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2376                 e1000_configure_rx(adapter);
2377                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2378         }
2379 }
2380
2381 /**
2382  * e1000_set_mac - Change the Ethernet Address of the NIC
2383  * @netdev: network interface device structure
2384  * @p: pointer to an address structure
2385  *
2386  * Returns 0 on success, negative on failure
2387  **/
2388
2389 static int e1000_set_mac(struct net_device *netdev, void *p)
2390 {
2391         struct e1000_adapter *adapter = netdev_priv(netdev);
2392         struct e1000_hw *hw = &adapter->hw;
2393         struct sockaddr *addr = p;
2394
2395         if (!is_valid_ether_addr(addr->sa_data))
2396                 return -EADDRNOTAVAIL;
2397
2398         /* 82542 2.0 needs to be in reset to write receive address registers */
2399
2400         if (hw->mac_type == e1000_82542_rev2_0)
2401                 e1000_enter_82542_rst(adapter);
2402
2403         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2404         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2405
2406         e1000_rar_set(hw, hw->mac_addr, 0);
2407
2408         /* With 82571 controllers, LAA may be overwritten (with the default)
2409          * due to controller reset from the other port. */
2410         if (hw->mac_type == e1000_82571) {
2411                 /* activate the work around */
2412                 hw->laa_is_present = 1;
2413
2414                 /* Hold a copy of the LAA in RAR[14] This is done so that
2415                  * between the time RAR[0] gets clobbered  and the time it
2416                  * gets fixed (in e1000_watchdog), the actual LAA is in one
2417                  * of the RARs and no incoming packets directed to this port
2418                  * are dropped. Eventaully the LAA will be in RAR[0] and
2419                  * RAR[14] */
2420                 e1000_rar_set(hw, hw->mac_addr,
2421                                         E1000_RAR_ENTRIES - 1);
2422         }
2423
2424         if (hw->mac_type == e1000_82542_rev2_0)
2425                 e1000_leave_82542_rst(adapter);
2426
2427         return 0;
2428 }
2429
2430 /**
2431  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2432  * @netdev: network interface device structure
2433  *
2434  * The set_rx_mode entry point is called whenever the unicast or multicast
2435  * address lists or the network interface flags are updated. This routine is
2436  * responsible for configuring the hardware for proper unicast, multicast,
2437  * promiscuous mode, and all-multi behavior.
2438  **/
2439
2440 static void e1000_set_rx_mode(struct net_device *netdev)
2441 {
2442         struct e1000_adapter *adapter = netdev_priv(netdev);
2443         struct e1000_hw *hw = &adapter->hw;
2444         struct dev_addr_list *uc_ptr;
2445         struct dev_addr_list *mc_ptr;
2446         u32 rctl;
2447         u32 hash_value;
2448         int i, rar_entries = E1000_RAR_ENTRIES;
2449         int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2450                                 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2451                                 E1000_NUM_MTA_REGISTERS;
2452
2453         if (hw->mac_type == e1000_ich8lan)
2454                 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2455
2456         /* reserve RAR[14] for LAA over-write work-around */
2457         if (hw->mac_type == e1000_82571)
2458                 rar_entries--;
2459
2460         /* Check for Promiscuous and All Multicast modes */
2461
2462         rctl = er32(RCTL);
2463
2464         if (netdev->flags & IFF_PROMISC) {
2465                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2466                 rctl &= ~E1000_RCTL_VFE;
2467         } else {
2468                 if (netdev->flags & IFF_ALLMULTI) {
2469                         rctl |= E1000_RCTL_MPE;
2470                 } else {
2471                         rctl &= ~E1000_RCTL_MPE;
2472                 }
2473                 if (adapter->hw.mac_type != e1000_ich8lan)
2474                         rctl |= E1000_RCTL_VFE;
2475         }
2476
2477         uc_ptr = NULL;
2478         if (netdev->uc_count > rar_entries - 1) {
2479                 rctl |= E1000_RCTL_UPE;
2480         } else if (!(netdev->flags & IFF_PROMISC)) {
2481                 rctl &= ~E1000_RCTL_UPE;
2482                 uc_ptr = netdev->uc_list;
2483         }
2484
2485         ew32(RCTL, rctl);
2486
2487         /* 82542 2.0 needs to be in reset to write receive address registers */
2488
2489         if (hw->mac_type == e1000_82542_rev2_0)
2490                 e1000_enter_82542_rst(adapter);
2491
2492         /* load the first 14 addresses into the exact filters 1-14. Unicast
2493          * addresses take precedence to avoid disabling unicast filtering
2494          * when possible.
2495          *
2496          * RAR 0 is used for the station MAC adddress
2497          * if there are not 14 addresses, go ahead and clear the filters
2498          * -- with 82571 controllers only 0-13 entries are filled here
2499          */
2500         mc_ptr = netdev->mc_list;
2501
2502         for (i = 1; i < rar_entries; i++) {
2503                 if (uc_ptr) {
2504                         e1000_rar_set(hw, uc_ptr->da_addr, i);
2505                         uc_ptr = uc_ptr->next;
2506                 } else if (mc_ptr) {
2507                         e1000_rar_set(hw, mc_ptr->da_addr, i);
2508                         mc_ptr = mc_ptr->next;
2509                 } else {
2510                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2511                         E1000_WRITE_FLUSH();
2512                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2513                         E1000_WRITE_FLUSH();
2514                 }
2515         }
2516         WARN_ON(uc_ptr != NULL);
2517
2518         /* clear the old settings from the multicast hash table */
2519
2520         for (i = 0; i < mta_reg_count; i++) {
2521                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2522                 E1000_WRITE_FLUSH();
2523         }
2524
2525         /* load any remaining addresses into the hash table */
2526
2527         for (; mc_ptr; mc_ptr = mc_ptr->next) {
2528                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
2529                 e1000_mta_set(hw, hash_value);
2530         }
2531
2532         if (hw->mac_type == e1000_82542_rev2_0)
2533                 e1000_leave_82542_rst(adapter);
2534 }
2535
2536 /* Need to wait a few seconds after link up to get diagnostic information from
2537  * the phy */
2538
2539 static void e1000_update_phy_info(unsigned long data)
2540 {
2541         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2542         struct e1000_hw *hw = &adapter->hw;
2543         e1000_phy_get_info(hw, &adapter->phy_info);
2544 }
2545
2546 /**
2547  * e1000_82547_tx_fifo_stall - Timer Call-back
2548  * @data: pointer to adapter cast into an unsigned long
2549  **/
2550
2551 static void e1000_82547_tx_fifo_stall(unsigned long data)
2552 {
2553         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2554         struct e1000_hw *hw = &adapter->hw;
2555         struct net_device *netdev = adapter->netdev;
2556         u32 tctl;
2557
2558         if (atomic_read(&adapter->tx_fifo_stall)) {
2559                 if ((er32(TDT) == er32(TDH)) &&
2560                    (er32(TDFT) == er32(TDFH)) &&
2561                    (er32(TDFTS) == er32(TDFHS))) {
2562                         tctl = er32(TCTL);
2563                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2564                         ew32(TDFT, adapter->tx_head_addr);
2565                         ew32(TDFH, adapter->tx_head_addr);
2566                         ew32(TDFTS, adapter->tx_head_addr);
2567                         ew32(TDFHS, adapter->tx_head_addr);
2568                         ew32(TCTL, tctl);
2569                         E1000_WRITE_FLUSH();
2570
2571                         adapter->tx_fifo_head = 0;
2572                         atomic_set(&adapter->tx_fifo_stall, 0);
2573                         netif_wake_queue(netdev);
2574                 } else {
2575                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2576                 }
2577         }
2578 }
2579
2580 /**
2581  * e1000_watchdog - Timer Call-back
2582  * @data: pointer to adapter cast into an unsigned long
2583  **/
2584 static void e1000_watchdog(unsigned long data)
2585 {
2586         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2587         struct e1000_hw *hw = &adapter->hw;
2588         struct net_device *netdev = adapter->netdev;
2589         struct e1000_tx_ring *txdr = adapter->tx_ring;
2590         u32 link, tctl;
2591         s32 ret_val;
2592
2593         ret_val = e1000_check_for_link(hw);
2594         if ((ret_val == E1000_ERR_PHY) &&
2595             (hw->phy_type == e1000_phy_igp_3) &&
2596             (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2597                 /* See e1000_kumeran_lock_loss_workaround() */
2598                 DPRINTK(LINK, INFO,
2599                         "Gigabit has been disabled, downgrading speed\n");
2600         }
2601
2602         if (hw->mac_type == e1000_82573) {
2603                 e1000_enable_tx_pkt_filtering(hw);
2604                 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
2605                         e1000_update_mng_vlan(adapter);
2606         }
2607
2608         if ((hw->media_type == e1000_media_type_internal_serdes) &&
2609            !(er32(TXCW) & E1000_TXCW_ANE))
2610                 link = !hw->serdes_link_down;
2611         else
2612                 link = er32(STATUS) & E1000_STATUS_LU;
2613
2614         if (link) {
2615                 if (!netif_carrier_ok(netdev)) {
2616                         u32 ctrl;
2617                         bool txb2b = true;
2618                         e1000_get_speed_and_duplex(hw,
2619                                                    &adapter->link_speed,
2620                                                    &adapter->link_duplex);
2621
2622                         ctrl = er32(CTRL);
2623                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2624                                 "Flow Control: %s\n",
2625                                 adapter->link_speed,
2626                                 adapter->link_duplex == FULL_DUPLEX ?
2627                                 "Full Duplex" : "Half Duplex",
2628                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2629                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2630                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2631                                 E1000_CTRL_TFCE) ? "TX" : "None" )));
2632
2633                         /* tweak tx_queue_len according to speed/duplex
2634                          * and adjust the timeout factor */
2635                         netdev->tx_queue_len = adapter->tx_queue_len;
2636                         adapter->tx_timeout_factor = 1;
2637                         switch (adapter->link_speed) {
2638                         case SPEED_10:
2639                                 txb2b = false;
2640                                 netdev->tx_queue_len = 10;
2641                                 adapter->tx_timeout_factor = 8;
2642                                 break;
2643                         case SPEED_100:
2644                                 txb2b = false;
2645                                 netdev->tx_queue_len = 100;
2646                                 /* maybe add some timeout factor ? */
2647                                 break;
2648                         }
2649
2650                         if ((hw->mac_type == e1000_82571 ||
2651                              hw->mac_type == e1000_82572) &&
2652                             !txb2b) {
2653                                 u32 tarc0;
2654                                 tarc0 = er32(TARC0);
2655                                 tarc0 &= ~(1 << 21);
2656                                 ew32(TARC0, tarc0);
2657                         }
2658
2659                         /* disable TSO for pcie and 10/100 speeds, to avoid
2660                          * some hardware issues */
2661                         if (!adapter->tso_force &&
2662                             hw->bus_type == e1000_bus_type_pci_express){
2663                                 switch (adapter->link_speed) {
2664                                 case SPEED_10:
2665                                 case SPEED_100:
2666                                         DPRINTK(PROBE,INFO,
2667                                         "10/100 speed: disabling TSO\n");
2668                                         netdev->features &= ~NETIF_F_TSO;
2669                                         netdev->features &= ~NETIF_F_TSO6;
2670                                         break;
2671                                 case SPEED_1000:
2672                                         netdev->features |= NETIF_F_TSO;
2673                                         netdev->features |= NETIF_F_TSO6;
2674                                         break;
2675                                 default:
2676                                         /* oops */
2677                                         break;
2678                                 }
2679                         }
2680
2681                         /* enable transmits in the hardware, need to do this
2682                          * after setting TARC0 */
2683                         tctl = er32(TCTL);
2684                         tctl |= E1000_TCTL_EN;
2685                         ew32(TCTL, tctl);
2686
2687                         netif_carrier_on(netdev);
2688                         netif_wake_queue(netdev);
2689                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2690                         adapter->smartspeed = 0;
2691                 } else {
2692                         /* make sure the receive unit is started */
2693                         if (hw->rx_needs_kicking) {
2694                                 u32 rctl = er32(RCTL);
2695                                 ew32(RCTL, rctl | E1000_RCTL_EN);
2696                         }
2697                 }
2698         } else {
2699                 if (netif_carrier_ok(netdev)) {
2700                         adapter->link_speed = 0;
2701                         adapter->link_duplex = 0;
2702                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
2703                         netif_carrier_off(netdev);
2704                         netif_stop_queue(netdev);
2705                         mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2706
2707                         /* 80003ES2LAN workaround--
2708                          * For packet buffer work-around on link down event;
2709                          * disable receives in the ISR and
2710                          * reset device here in the watchdog
2711                          */
2712                         if (hw->mac_type == e1000_80003es2lan)
2713                                 /* reset device */
2714                                 schedule_work(&adapter->reset_task);
2715                 }
2716
2717                 e1000_smartspeed(adapter);
2718         }
2719
2720         e1000_update_stats(adapter);
2721
2722         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2723         adapter->tpt_old = adapter->stats.tpt;
2724         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2725         adapter->colc_old = adapter->stats.colc;
2726
2727         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2728         adapter->gorcl_old = adapter->stats.gorcl;
2729         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2730         adapter->gotcl_old = adapter->stats.gotcl;
2731
2732         e1000_update_adaptive(hw);
2733
2734         if (!netif_carrier_ok(netdev)) {
2735                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2736                         /* We've lost link, so the controller stops DMA,
2737                          * but we've got queued Tx work that's never going
2738                          * to get done, so reset controller to flush Tx.
2739                          * (Do the reset outside of interrupt context). */
2740                         adapter->tx_timeout_count++;
2741                         schedule_work(&adapter->reset_task);
2742                 }
2743         }
2744
2745         /* Cause software interrupt to ensure rx ring is cleaned */
2746         ew32(ICS, E1000_ICS_RXDMT0);
2747
2748         /* Force detection of hung controller every watchdog period */
2749         adapter->detect_tx_hung = true;
2750
2751         /* With 82571 controllers, LAA may be overwritten due to controller
2752          * reset from the other port. Set the appropriate LAA in RAR[0] */
2753         if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2754                 e1000_rar_set(hw, hw->mac_addr, 0);
2755
2756         /* Reset the timer */
2757         mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
2758 }
2759
2760 enum latency_range {
2761         lowest_latency = 0,
2762         low_latency = 1,
2763         bulk_latency = 2,
2764         latency_invalid = 255
2765 };
2766
2767 /**
2768  * e1000_update_itr - update the dynamic ITR value based on statistics
2769  *      Stores a new ITR value based on packets and byte
2770  *      counts during the last interrupt.  The advantage of per interrupt
2771  *      computation is faster updates and more accurate ITR for the current
2772  *      traffic pattern.  Constants in this function were computed
2773  *      based on theoretical maximum wire speed and thresholds were set based
2774  *      on testing data as well as attempting to minimize response time
2775  *      while increasing bulk throughput.
2776  *      this functionality is controlled by the InterruptThrottleRate module
2777  *      parameter (see e1000_param.c)
2778  * @adapter: pointer to adapter
2779  * @itr_setting: current adapter->itr
2780  * @packets: the number of packets during this measurement interval
2781  * @bytes: the number of bytes during this measurement interval
2782  **/
2783 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2784                                      u16 itr_setting, int packets, int bytes)
2785 {
2786         unsigned int retval = itr_setting;
2787         struct e1000_hw *hw = &adapter->hw;
2788
2789         if (unlikely(hw->mac_type < e1000_82540))
2790                 goto update_itr_done;
2791
2792         if (packets == 0)
2793                 goto update_itr_done;
2794
2795         switch (itr_setting) {
2796         case lowest_latency:
2797                 /* jumbo frames get bulk treatment*/
2798                 if (bytes/packets > 8000)
2799                         retval = bulk_latency;
2800                 else if ((packets < 5) && (bytes > 512))
2801                         retval = low_latency;
2802                 break;
2803         case low_latency:  /* 50 usec aka 20000 ints/s */
2804                 if (bytes > 10000) {
2805                         /* jumbo frames need bulk latency setting */
2806                         if (bytes/packets > 8000)
2807                                 retval = bulk_latency;
2808                         else if ((packets < 10) || ((bytes/packets) > 1200))
2809                                 retval = bulk_latency;
2810                         else if ((packets > 35))
2811                                 retval = lowest_latency;
2812                 } else if (bytes/packets > 2000)
2813                         retval = bulk_latency;
2814                 else if (packets <= 2 && bytes < 512)
2815                         retval = lowest_latency;
2816                 break;
2817         case bulk_latency: /* 250 usec aka 4000 ints/s */
2818                 if (bytes > 25000) {
2819                         if (packets > 35)
2820                                 retval = low_latency;
2821                 } else if (bytes < 6000) {
2822                         retval = low_latency;
2823                 }
2824                 break;
2825         }
2826
2827 update_itr_done:
2828         return retval;
2829 }
2830
2831 static void e1000_set_itr(struct e1000_adapter *adapter)
2832 {
2833         struct e1000_hw *hw = &adapter->hw;
2834         u16 current_itr;
2835         u32 new_itr = adapter->itr;
2836
2837         if (unlikely(hw->mac_type < e1000_82540))
2838                 return;
2839
2840         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2841         if (unlikely(adapter->link_speed != SPEED_1000)) {
2842                 current_itr = 0;
2843                 new_itr = 4000;
2844                 goto set_itr_now;
2845         }
2846
2847         adapter->tx_itr = e1000_update_itr(adapter,
2848                                     adapter->tx_itr,
2849                                     adapter->total_tx_packets,
2850                                     adapter->total_tx_bytes);
2851         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2852         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2853                 adapter->tx_itr = low_latency;
2854
2855         adapter->rx_itr = e1000_update_itr(adapter,
2856                                     adapter->rx_itr,
2857                                     adapter->total_rx_packets,
2858                                     adapter->total_rx_bytes);
2859         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2860         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2861                 adapter->rx_itr = low_latency;
2862
2863         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2864
2865         switch (current_itr) {
2866         /* counts and packets in update_itr are dependent on these numbers */
2867         case lowest_latency:
2868                 new_itr = 70000;
2869                 break;
2870         case low_latency:
2871                 new_itr = 20000; /* aka hwitr = ~200 */
2872                 break;
2873         case bulk_latency:
2874                 new_itr = 4000;
2875                 break;
2876         default:
2877                 break;
2878         }
2879
2880 set_itr_now:
2881         if (new_itr != adapter->itr) {
2882                 /* this attempts to bias the interrupt rate towards Bulk
2883                  * by adding intermediate steps when interrupt rate is
2884                  * increasing */
2885                 new_itr = new_itr > adapter->itr ?
2886                              min(adapter->itr + (new_itr >> 2), new_itr) :
2887                              new_itr;
2888                 adapter->itr = new_itr;
2889                 ew32(ITR, 1000000000 / (new_itr * 256));
2890         }
2891
2892         return;
2893 }
2894
2895 #define E1000_TX_FLAGS_CSUM             0x00000001
2896 #define E1000_TX_FLAGS_VLAN             0x00000002
2897 #define E1000_TX_FLAGS_TSO              0x00000004
2898 #define E1000_TX_FLAGS_IPV4             0x00000008
2899 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2900 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2901
2902 static int e1000_tso(struct e1000_adapter *adapter,
2903                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2904 {
2905         struct e1000_context_desc *context_desc;
2906         struct e1000_buffer *buffer_info;
2907         unsigned int i;
2908         u32 cmd_length = 0;
2909         u16 ipcse = 0, tucse, mss;
2910         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2911         int err;
2912
2913         if (skb_is_gso(skb)) {
2914                 if (skb_header_cloned(skb)) {
2915                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2916                         if (err)
2917                                 return err;
2918                 }
2919
2920                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2921                 mss = skb_shinfo(skb)->gso_size;
2922                 if (skb->protocol == htons(ETH_P_IP)) {
2923                         struct iphdr *iph = ip_hdr(skb);
2924                         iph->tot_len = 0;
2925                         iph->check = 0;
2926                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2927                                                                  iph->daddr, 0,
2928                                                                  IPPROTO_TCP,
2929                                                                  0);
2930                         cmd_length = E1000_TXD_CMD_IP;
2931                         ipcse = skb_transport_offset(skb) - 1;
2932                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2933                         ipv6_hdr(skb)->payload_len = 0;
2934                         tcp_hdr(skb)->check =
2935                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2936                                                  &ipv6_hdr(skb)->daddr,
2937                                                  0, IPPROTO_TCP, 0);
2938                         ipcse = 0;
2939                 }
2940                 ipcss = skb_network_offset(skb);
2941                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2942                 tucss = skb_transport_offset(skb);
2943                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2944                 tucse = 0;
2945
2946                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2947                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2948
2949                 i = tx_ring->next_to_use;
2950                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2951                 buffer_info = &tx_ring->buffer_info[i];
2952
2953                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2954                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2955                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2956                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2957                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2958                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2959                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2960                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2961                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2962
2963                 buffer_info->time_stamp = jiffies;
2964                 buffer_info->next_to_watch = i;
2965
2966                 if (++i == tx_ring->count) i = 0;
2967                 tx_ring->next_to_use = i;
2968
2969                 return true;
2970         }
2971         return false;
2972 }
2973
2974 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2975                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2976 {
2977         struct e1000_context_desc *context_desc;
2978         struct e1000_buffer *buffer_info;
2979         unsigned int i;
2980         u8 css;
2981
2982         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2983                 css = skb_transport_offset(skb);
2984
2985                 i = tx_ring->next_to_use;
2986                 buffer_info = &tx_ring->buffer_info[i];
2987                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2988
2989                 context_desc->lower_setup.ip_config = 0;
2990                 context_desc->upper_setup.tcp_fields.tucss = css;
2991                 context_desc->upper_setup.tcp_fields.tucso =
2992                         css + skb->csum_offset;
2993                 context_desc->upper_setup.tcp_fields.tucse = 0;
2994                 context_desc->tcp_seg_setup.data = 0;
2995                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2996
2997                 buffer_info->time_stamp = jiffies;
2998                 buffer_info->next_to_watch = i;
2999
3000                 if (unlikely(++i == tx_ring->count)) i = 0;
3001                 tx_ring->next_to_use = i;
3002
3003                 return true;
3004         }
3005
3006         return false;
3007 }
3008
3009 #define E1000_MAX_TXD_PWR       12
3010 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
3011
3012 static int e1000_tx_map(struct e1000_adapter *adapter,
3013                         struct e1000_tx_ring *tx_ring,
3014                         struct sk_buff *skb, unsigned int first,
3015                         unsigned int max_per_txd, unsigned int nr_frags,
3016                         unsigned int mss)
3017 {
3018         struct e1000_hw *hw = &adapter->hw;
3019         struct e1000_buffer *buffer_info;
3020         unsigned int len = skb->len;
3021         unsigned int offset = 0, size, count = 0, i;
3022         unsigned int f;
3023         len -= skb->data_len;
3024
3025         i = tx_ring->next_to_use;
3026
3027         while (len) {
3028                 buffer_info = &tx_ring->buffer_info[i];
3029                 size = min(len, max_per_txd);
3030                 /* Workaround for Controller erratum --
3031                  * descriptor for non-tso packet in a linear SKB that follows a
3032                  * tso gets written back prematurely before the data is fully
3033                  * DMA'd to the controller */
3034                 if (!skb->data_len && tx_ring->last_tx_tso &&
3035                     !skb_is_gso(skb)) {
3036                         tx_ring->last_tx_tso = 0;
3037                         size -= 4;
3038                 }
3039
3040                 /* Workaround for premature desc write-backs
3041                  * in TSO mode.  Append 4-byte sentinel desc */
3042                 if (unlikely(mss && !nr_frags && size == len && size > 8))
3043                         size -= 4;
3044                 /* work-around for errata 10 and it applies
3045                  * to all controllers in PCI-X mode
3046                  * The fix is to make sure that the first descriptor of a
3047                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
3048                  */
3049                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3050                                 (size > 2015) && count == 0))
3051                         size = 2015;
3052
3053                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
3054                  * terminating buffers within evenly-aligned dwords. */
3055                 if (unlikely(adapter->pcix_82544 &&
3056                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
3057                    size > 4))
3058                         size -= 4;
3059
3060                 buffer_info->length = size;
3061                 buffer_info->dma =
3062                         pci_map_single(adapter->pdev,
3063                                 skb->data + offset,
3064                                 size,
3065                                 PCI_DMA_TODEVICE);
3066                 buffer_info->time_stamp = jiffies;
3067                 buffer_info->next_to_watch = i;
3068
3069                 len -= size;
3070                 offset += size;
3071                 count++;
3072                 if (unlikely(++i == tx_ring->count)) i = 0;
3073         }
3074
3075         for (f = 0; f < nr_frags; f++) {
3076                 struct skb_frag_struct *frag;
3077
3078                 frag = &skb_shinfo(skb)->frags[f];
3079                 len = frag->size;
3080                 offset = frag->page_offset;
3081
3082                 while (len) {
3083                         buffer_info = &tx_ring->buffer_info[i];
3084                         size = min(len, max_per_txd);
3085                         /* Workaround for premature desc write-backs
3086                          * in TSO mode.  Append 4-byte sentinel desc */
3087                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3088                                 size -= 4;
3089                         /* Workaround for potential 82544 hang in PCI-X.
3090                          * Avoid terminating buffers within evenly-aligned
3091                          * dwords. */
3092                         if (unlikely(adapter->pcix_82544 &&
3093                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
3094                            size > 4))
3095                                 size -= 4;
3096
3097                         buffer_info->length = size;
3098                         buffer_info->dma =
3099                                 pci_map_page(adapter->pdev,
3100                                         frag->page,
3101                                         offset,
3102                                         size,
3103                                         PCI_DMA_TODEVICE);
3104                         buffer_info->time_stamp = jiffies;
3105                         buffer_info->next_to_watch = i;
3106
3107                         len -= size;
3108                         offset += size;
3109                         count++;
3110                         if (unlikely(++i == tx_ring->count)) i = 0;
3111                 }
3112         }
3113
3114         i = (i == 0) ? tx_ring->count - 1 : i - 1;
3115         tx_ring->buffer_info[i].skb = skb;
3116         tx_ring->buffer_info[first].next_to_watch = i;
3117
3118         return count;
3119 }
3120
3121 static void e1000_tx_queue(struct e1000_adapter *adapter,
3122                            struct e1000_tx_ring *tx_ring, int tx_flags,
3123                            int count)
3124 {
3125         struct e1000_hw *hw = &adapter->hw;
3126         struct e1000_tx_desc *tx_desc = NULL;
3127         struct e1000_buffer *buffer_info;
3128         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3129         unsigned int i;
3130
3131         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3132                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3133                              E1000_TXD_CMD_TSE;
3134                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3135
3136                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3137                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3138         }
3139
3140         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3141                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3142                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3143         }
3144
3145         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3146                 txd_lower |= E1000_TXD_CMD_VLE;
3147                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3148         }
3149
3150         i = tx_ring->next_to_use;
3151
3152         while (count--) {
3153                 buffer_info = &tx_ring->buffer_info[i];
3154                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3155                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3156                 tx_desc->lower.data =
3157                         cpu_to_le32(txd_lower | buffer_info->length);
3158                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3159                 if (unlikely(++i == tx_ring->count)) i = 0;
3160         }
3161
3162         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3163
3164         /* Force memory writes to complete before letting h/w
3165          * know there are new descriptors to fetch.  (Only
3166          * applicable for weak-ordered memory model archs,
3167          * such as IA-64). */
3168         wmb();
3169
3170         tx_ring->next_to_use = i;
3171         writel(i, hw->hw_addr + tx_ring->tdt);
3172         /* we need this if more than one processor can write to our tail
3173          * at a time, it syncronizes IO on IA64/Altix systems */
3174         mmiowb();
3175 }
3176
3177 /**
3178  * 82547 workaround to avoid controller hang in half-duplex environment.
3179  * The workaround is to avoid queuing a large packet that would span
3180  * the internal Tx FIFO ring boundary by notifying the stack to resend
3181  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3182  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3183  * to the beginning of the Tx FIFO.
3184  **/
3185
3186 #define E1000_FIFO_HDR                  0x10
3187 #define E1000_82547_PAD_LEN             0x3E0
3188
3189 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3190                                        struct sk_buff *skb)
3191 {
3192         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3193         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3194
3195         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3196
3197         if (adapter->link_duplex != HALF_DUPLEX)
3198                 goto no_fifo_stall_required;
3199
3200         if (atomic_read(&adapter->tx_fifo_stall))
3201                 return 1;
3202
3203         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3204                 atomic_set(&adapter->tx_fifo_stall, 1);
3205                 return 1;
3206         }
3207
3208 no_fifo_stall_required:
3209         adapter->tx_fifo_head += skb_fifo_len;
3210         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3211                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3212         return 0;
3213 }
3214
3215 #define MINIMUM_DHCP_PACKET_SIZE 282
3216 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3217                                     struct sk_buff *skb)
3218 {
3219         struct e1000_hw *hw =  &adapter->hw;
3220         u16 length, offset;
3221         if (vlan_tx_tag_present(skb)) {
3222                 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3223                         ( hw->mng_cookie.status &
3224                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3225                         return 0;
3226         }
3227         if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3228                 struct ethhdr *eth = (struct ethhdr *) skb->data;
3229                 if ((htons(ETH_P_IP) == eth->h_proto)) {
3230                         const struct iphdr *ip =
3231                                 (struct iphdr *)((u8 *)skb->data+14);
3232                         if (IPPROTO_UDP == ip->protocol) {
3233                                 struct udphdr *udp =
3234                                         (struct udphdr *)((u8 *)ip +
3235                                                 (ip->ihl << 2));
3236                                 if (ntohs(udp->dest) == 67) {
3237                                         offset = (u8 *)udp + 8 - skb->data;
3238                                         length = skb->len - offset;
3239
3240                                         return e1000_mng_write_dhcp_info(hw,
3241                                                         (u8 *)udp + 8,
3242                                                         length);
3243                                 }
3244                         }
3245                 }
3246         }
3247         return 0;
3248 }
3249
3250 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3251 {
3252         struct e1000_adapter *adapter = netdev_priv(netdev);
3253         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3254
3255         netif_stop_queue(netdev);
3256         /* Herbert's original patch had:
3257          *  smp_mb__after_netif_stop_queue();
3258          * but since that doesn't exist yet, just open code it. */
3259         smp_mb();
3260
3261         /* We need to check again in a case another CPU has just
3262          * made room available. */
3263         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3264                 return -EBUSY;
3265
3266         /* A reprieve! */
3267         netif_start_queue(netdev);
3268         ++adapter->restart_queue;
3269         return 0;
3270 }
3271
3272 static int e1000_maybe_stop_tx(struct net_device *netdev,
3273                                struct e1000_tx_ring *tx_ring, int size)
3274 {
3275         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3276                 return 0;
3277         return __e1000_maybe_stop_tx(netdev, size);
3278 }
3279
3280 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3281 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3282 {
3283         struct e1000_adapter *adapter = netdev_priv(netdev);
3284         struct e1000_hw *hw = &adapter->hw;
3285         struct e1000_tx_ring *tx_ring;
3286         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3287         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3288         unsigned int tx_flags = 0;
3289         unsigned int len = skb->len - skb->data_len;
3290         unsigned long flags;
3291         unsigned int nr_frags;
3292         unsigned int mss;
3293         int count = 0;
3294         int tso;
3295         unsigned int f;
3296
3297         /* This goes back to the question of how to logically map a tx queue
3298          * to a flow.  Right now, performance is impacted slightly negatively
3299          * if using multiple tx queues.  If the stack breaks away from a
3300          * single qdisc implementation, we can look at this again. */
3301         tx_ring = adapter->tx_ring;
3302
3303         if (unlikely(skb->len <= 0)) {
3304                 dev_kfree_skb_any(skb);
3305                 return NETDEV_TX_OK;
3306         }
3307
3308         /* 82571 and newer doesn't need the workaround that limited descriptor
3309          * length to 4kB */
3310         if (hw->mac_type >= e1000_82571)
3311                 max_per_txd = 8192;
3312
3313         mss = skb_shinfo(skb)->gso_size;
3314         /* The controller does a simple calculation to
3315          * make sure there is enough room in the FIFO before
3316          * initiating the DMA for each buffer.  The calc is:
3317          * 4 = ceil(buffer len/mss).  To make sure we don't
3318          * overrun the FIFO, adjust the max buffer len if mss
3319          * drops. */
3320         if (mss) {
3321                 u8 hdr_len;
3322                 max_per_txd = min(mss << 2, max_per_txd);
3323                 max_txd_pwr = fls(max_per_txd) - 1;
3324
3325                 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3326                 * points to just header, pull a few bytes of payload from
3327                 * frags into skb->data */
3328                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3329                 if (skb->data_len && hdr_len == len) {
3330                         switch (hw->mac_type) {
3331                                 unsigned int pull_size;
3332                         case e1000_82544:
3333                                 /* Make sure we have room to chop off 4 bytes,
3334                                  * and that the end alignment will work out to
3335                                  * this hardware's requirements
3336                                  * NOTE: this is a TSO only workaround
3337                                  * if end byte alignment not correct move us
3338                                  * into the next dword */
3339                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3340                                         break;
3341                                 /* fall through */
3342                         case e1000_82571:
3343                         case e1000_82572:
3344                         case e1000_82573:
3345                         case e1000_ich8lan:
3346                                 pull_size = min((unsigned int)4, skb->data_len);
3347                                 if (!__pskb_pull_tail(skb, pull_size)) {
3348                                         DPRINTK(DRV, ERR,
3349                                                 "__pskb_pull_tail failed.\n");
3350                                         dev_kfree_skb_any(skb);
3351                                         return NETDEV_TX_OK;
3352                                 }
3353                                 len = skb->len - skb->data_len;
3354                                 break;
3355                         default:
3356                                 /* do nothing */
3357                                 break;
3358                         }
3359                 }
3360         }
3361
3362         /* reserve a descriptor for the offload context */
3363         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3364                 count++;
3365         count++;
3366
3367         /* Controller Erratum workaround */
3368         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3369                 count++;
3370
3371         count += TXD_USE_COUNT(len, max_txd_pwr);
3372
3373         if (adapter->pcix_82544)
3374                 count++;
3375
3376         /* work-around for errata 10 and it applies to all controllers
3377          * in PCI-X mode, so add one more descriptor to the count
3378          */
3379         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3380                         (len > 2015)))
3381                 count++;
3382
3383         nr_frags = skb_shinfo(skb)->nr_frags;
3384         for (f = 0; f < nr_frags; f++)
3385                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3386                                        max_txd_pwr);
3387         if (adapter->pcix_82544)
3388                 count += nr_frags;
3389
3390
3391         if (hw->tx_pkt_filtering &&
3392             (hw->mac_type == e1000_82573))
3393                 e1000_transfer_dhcp_info(adapter, skb);
3394
3395         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3396                 /* Collision - tell upper layer to requeue */
3397                 return NETDEV_TX_LOCKED;
3398
3399         /* need: count + 2 desc gap to keep tail from touching
3400          * head, otherwise try next time */
3401         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
3402                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3403                 return NETDEV_TX_BUSY;
3404         }
3405
3406         if (unlikely(hw->mac_type == e1000_82547)) {
3407                 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3408                         netif_stop_queue(netdev);
3409                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3410                         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3411                         return NETDEV_TX_BUSY;
3412                 }
3413         }
3414
3415         if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3416                 tx_flags |= E1000_TX_FLAGS_VLAN;
3417                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3418         }
3419
3420         first = tx_ring->next_to_use;
3421
3422         tso = e1000_tso(adapter, tx_ring, skb);
3423         if (tso < 0) {
3424                 dev_kfree_skb_any(skb);
3425                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3426                 return NETDEV_TX_OK;
3427         }
3428
3429         if (likely(tso)) {
3430                 tx_ring->last_tx_tso = 1;
3431                 tx_flags |= E1000_TX_FLAGS_TSO;
3432         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3433                 tx_flags |= E1000_TX_FLAGS_CSUM;
3434
3435         /* Old method was to assume IPv4 packet by default if TSO was enabled.
3436          * 82571 hardware supports TSO capabilities for IPv6 as well...
3437          * no longer assume, we must. */
3438         if (likely(skb->protocol == htons(ETH_P_IP)))
3439                 tx_flags |= E1000_TX_FLAGS_IPV4;
3440
3441         e1000_tx_queue(adapter, tx_ring, tx_flags,
3442                        e1000_tx_map(adapter, tx_ring, skb, first,
3443                                     max_per_txd, nr_frags, mss));
3444
3445         netdev->trans_start = jiffies;
3446
3447         /* Make sure there is space in the ring for the next send. */
3448         e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3449
3450         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3451         return NETDEV_TX_OK;
3452 }
3453
3454 /**
3455  * e1000_tx_timeout - Respond to a Tx Hang
3456  * @netdev: network interface device structure
3457  **/
3458
3459 static void e1000_tx_timeout(struct net_device *netdev)
3460 {
3461         struct e1000_adapter *adapter = netdev_priv(netdev);
3462
3463         /* Do the reset outside of interrupt context */
3464         adapter->tx_timeout_count++;
3465         schedule_work(&adapter->reset_task);
3466 }
3467
3468 static void e1000_reset_task(struct work_struct *work)
3469 {
3470         struct e1000_adapter *adapter =
3471                 container_of(work, struct e1000_adapter, reset_task);
3472
3473         e1000_reinit_locked(adapter);
3474 }
3475
3476 /**
3477  * e1000_get_stats - Get System Network Statistics
3478  * @netdev: network interface device structure
3479  *
3480  * Returns the address of the device statistics structure.
3481  * The statistics are actually updated from the timer callback.
3482  **/
3483
3484 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3485 {
3486         struct e1000_adapter *adapter = netdev_priv(netdev);
3487
3488         /* only return the current stats */
3489         return &adapter->net_stats;
3490 }
3491
3492 /**
3493  * e1000_change_mtu - Change the Maximum Transfer Unit
3494  * @netdev: network interface device structure
3495  * @new_mtu: new value for maximum frame size
3496  *
3497  * Returns 0 on success, negative on failure
3498  **/
3499
3500 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3501 {
3502         struct e1000_adapter *adapter = netdev_priv(netdev);
3503         struct e1000_hw *hw = &adapter->hw;
3504         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3505         u16 eeprom_data = 0;
3506
3507         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3508             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3509                 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3510                 return -EINVAL;
3511         }
3512
3513         /* Adapter-specific max frame size limits. */
3514         switch (hw->mac_type) {
3515         case e1000_undefined ... e1000_82542_rev2_1:
3516         case e1000_ich8lan:
3517                 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3518                         DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3519                         return -EINVAL;
3520                 }
3521                 break;
3522         case e1000_82573:
3523                 /* Jumbo Frames not supported if:
3524                  * - this is not an 82573L device
3525                  * - ASPM is enabled in any way (0x1A bits 3:2) */
3526                 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3527                                   &eeprom_data);
3528                 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3529                     (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3530                         if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3531                                 DPRINTK(PROBE, ERR,
3532                                         "Jumbo Frames not supported.\n");
3533                                 return -EINVAL;
3534                         }
3535                         break;
3536                 }
3537                 /* ERT will be enabled later to enable wire speed receives */
3538
3539                 /* fall through to get support */
3540         case e1000_82571:
3541         case e1000_82572:
3542         case e1000_80003es2lan:
3543 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3544                 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3545                         DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3546                         return -EINVAL;
3547                 }
3548                 break;
3549         default:
3550                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3551                 break;
3552         }
3553
3554         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3555          * means we reserve 2 more, this pushes us to allocate from the next
3556          * larger slab size
3557          * i.e. RXBUFFER_2048 --> size-4096 slab */
3558
3559         if (max_frame <= E1000_RXBUFFER_256)
3560                 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3561         else if (max_frame <= E1000_RXBUFFER_512)
3562                 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3563         else if (max_frame <= E1000_RXBUFFER_1024)
3564                 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3565         else if (max_frame <= E1000_RXBUFFER_2048)
3566                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3567         else if (max_frame <= E1000_RXBUFFER_4096)
3568                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3569         else if (max_frame <= E1000_RXBUFFER_8192)
3570                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3571         else if (max_frame <= E1000_RXBUFFER_16384)
3572                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3573
3574         /* adjust allocation if LPE protects us, and we aren't using SBP */
3575         if (!hw->tbi_compatibility_on &&
3576             ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3577              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3578                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3579
3580         netdev->mtu = new_mtu;
3581         hw->max_frame_size = max_frame;
3582
3583         if (netif_running(netdev))
3584                 e1000_reinit_locked(adapter);
3585
3586         return 0;
3587 }
3588
3589 /**
3590  * e1000_update_stats - Update the board statistics counters
3591  * @adapter: board private structure
3592  **/
3593
3594 void e1000_update_stats(struct e1000_adapter *adapter)
3595 {
3596         struct e1000_hw *hw = &adapter->hw;
3597         struct pci_dev *pdev = adapter->pdev;
3598         unsigned long flags;
3599         u16 phy_tmp;
3600
3601 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3602
3603         /*
3604          * Prevent stats update while adapter is being reset, or if the pci
3605          * connection is down.
3606          */
3607         if (adapter->link_speed == 0)
3608                 return;
3609         if (pci_channel_offline(pdev))
3610                 return;
3611
3612         spin_lock_irqsave(&adapter->stats_lock, flags);
3613
3614         /* these counters are modified from e1000_tbi_adjust_stats,
3615          * called from the interrupt context, so they must only
3616          * be written while holding adapter->stats_lock
3617          */
3618
3619         adapter->stats.crcerrs += er32(CRCERRS);
3620         adapter->stats.gprc += er32(GPRC);
3621         adapter->stats.gorcl += er32(GORCL);
3622         adapter->stats.gorch += er32(GORCH);
3623         adapter->stats.bprc += er32(BPRC);
3624         adapter->stats.mprc += er32(MPRC);
3625         adapter->stats.roc += er32(ROC);
3626
3627         if (hw->mac_type != e1000_ich8lan) {
3628                 adapter->stats.prc64 += er32(PRC64);
3629                 adapter->stats.prc127 += er32(PRC127);
3630                 adapter->stats.prc255 += er32(PRC255);
3631                 adapter->stats.prc511 += er32(PRC511);
3632                 adapter->stats.prc1023 += er32(PRC1023);
3633                 adapter->stats.prc1522 += er32(PRC1522);
3634         }
3635
3636         adapter->stats.symerrs += er32(SYMERRS);
3637         adapter->stats.mpc += er32(MPC);
3638         adapter->stats.scc += er32(SCC);
3639         adapter->stats.ecol += er32(ECOL);
3640         adapter->stats.mcc += er32(MCC);
3641         adapter->stats.latecol += er32(LATECOL);
3642         adapter->stats.dc += er32(DC);
3643         adapter->stats.sec += er32(SEC);
3644         adapter->stats.rlec += er32(RLEC);
3645         adapter->stats.xonrxc += er32(XONRXC);
3646         adapter->stats.xontxc += er32(XONTXC);
3647         adapter->stats.xoffrxc += er32(XOFFRXC);
3648         adapter->stats.xofftxc += er32(XOFFTXC);
3649         adapter->stats.fcruc += er32(FCRUC);
3650         adapter->stats.gptc += er32(GPTC);
3651         adapter->stats.gotcl += er32(GOTCL);
3652         adapter->stats.gotch += er32(GOTCH);
3653         adapter->stats.rnbc += er32(RNBC);
3654         adapter->stats.ruc += er32(RUC);
3655         adapter->stats.rfc += er32(RFC);
3656         adapter->stats.rjc += er32(RJC);
3657         adapter->stats.torl += er32(TORL);
3658         adapter->stats.torh += er32(TORH);
3659         adapter->stats.totl += er32(TOTL);
3660         adapter->stats.toth += er32(TOTH);
3661         adapter->stats.tpr += er32(TPR);
3662
3663         if (hw->mac_type != e1000_ich8lan) {
3664                 adapter->stats.ptc64 += er32(PTC64);
3665                 adapter->stats.ptc127 += er32(PTC127);
3666                 adapter->stats.ptc255 += er32(PTC255);
3667                 adapter->stats.ptc511 += er32(PTC511);
3668                 adapter->stats.ptc1023 += er32(PTC1023);
3669                 adapter->stats.ptc1522 += er32(PTC1522);
3670         }
3671
3672         adapter->stats.mptc += er32(MPTC);
3673         adapter->stats.bptc += er32(BPTC);
3674
3675         /* used for adaptive IFS */
3676
3677         hw->tx_packet_delta = er32(TPT);
3678         adapter->stats.tpt += hw->tx_packet_delta;
3679         hw->collision_delta = er32(COLC);
3680         adapter->stats.colc += hw->collision_delta;
3681
3682         if (hw->mac_type >= e1000_82543) {
3683                 adapter->stats.algnerrc += er32(ALGNERRC);
3684                 adapter->stats.rxerrc += er32(RXERRC);
3685                 adapter->stats.tncrs += er32(TNCRS);
3686                 adapter->stats.cexterr += er32(CEXTERR);
3687                 adapter->stats.tsctc += er32(TSCTC);
3688                 adapter->stats.tsctfc += er32(TSCTFC);
3689         }
3690         if (hw->mac_type > e1000_82547_rev_2) {
3691                 adapter->stats.iac += er32(IAC);
3692                 adapter->stats.icrxoc += er32(ICRXOC);
3693
3694                 if (hw->mac_type != e1000_ich8lan) {
3695                         adapter->stats.icrxptc += er32(ICRXPTC);
3696                         adapter->stats.icrxatc += er32(ICRXATC);
3697                         adapter->stats.ictxptc += er32(ICTXPTC);
3698                         adapter->stats.ictxatc += er32(ICTXATC);
3699                         adapter->stats.ictxqec += er32(ICTXQEC);
3700                         adapter->stats.ictxqmtc += er32(ICTXQMTC);
3701                         adapter->stats.icrxdmtc += er32(ICRXDMTC);
3702                 }
3703         }
3704
3705         /* Fill out the OS statistics structure */
3706         adapter->net_stats.multicast = adapter->stats.mprc;
3707         adapter->net_stats.collisions = adapter->stats.colc;
3708
3709         /* Rx Errors */
3710
3711         /* RLEC on some newer hardware can be incorrect so build
3712         * our own version based on RUC and ROC */
3713         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3714                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3715                 adapter->stats.ruc + adapter->stats.roc +
3716                 adapter->stats.cexterr;
3717         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3718         adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
3719         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3720         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3721         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3722
3723         /* Tx Errors */
3724         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3725         adapter->net_stats.tx_errors = adapter->stats.txerrc;
3726         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3727         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3728         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3729         if (hw->bad_tx_carr_stats_fd &&
3730             adapter->link_duplex == FULL_DUPLEX) {
3731                 adapter->net_stats.tx_carrier_errors = 0;
3732                 adapter->stats.tncrs = 0;
3733         }
3734
3735         /* Tx Dropped needs to be maintained elsewhere */
3736
3737         /* Phy Stats */
3738         if (hw->media_type == e1000_media_type_copper) {
3739                 if ((adapter->link_speed == SPEED_1000) &&
3740                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3741                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3742                         adapter->phy_stats.idle_errors += phy_tmp;
3743                 }
3744
3745                 if ((hw->mac_type <= e1000_82546) &&
3746                    (hw->phy_type == e1000_phy_m88) &&
3747                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3748                         adapter->phy_stats.receive_errors += phy_tmp;
3749         }
3750
3751         /* Management Stats */
3752         if (hw->has_smbus) {
3753                 adapter->stats.mgptc += er32(MGTPTC);
3754                 adapter->stats.mgprc += er32(MGTPRC);
3755                 adapter->stats.mgpdc += er32(MGTPDC);
3756         }
3757
3758         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3759 }
3760
3761 /**
3762  * e1000_intr_msi - Interrupt Handler
3763  * @irq: interrupt number
3764  * @data: pointer to a network interface device structure
3765  **/
3766
3767 static irqreturn_t e1000_intr_msi(int irq, void *data)
3768 {
3769         struct net_device *netdev = data;
3770         struct e1000_adapter *adapter = netdev_priv(netdev);
3771         struct e1000_hw *hw = &adapter->hw;
3772 #ifndef CONFIG_E1000_NAPI
3773         int i;
3774 #endif
3775         u32 icr = er32(ICR);
3776
3777         /* in NAPI mode read ICR disables interrupts using IAM */
3778
3779         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3780                 hw->get_link_status = 1;
3781                 /* 80003ES2LAN workaround-- For packet buffer work-around on
3782                  * link down event; disable receives here in the ISR and reset
3783                  * adapter in watchdog */
3784                 if (netif_carrier_ok(netdev) &&
3785                     (hw->mac_type == e1000_80003es2lan)) {
3786                         /* disable receives */
3787                         u32 rctl = er32(RCTL);
3788                         ew32(RCTL, rctl & ~E1000_RCTL_EN);
3789                 }
3790                 /* guard against interrupt when we're going down */
3791                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3792                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3793         }
3794
3795 #ifdef CONFIG_E1000_NAPI
3796         if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
3797                 adapter->total_tx_bytes = 0;
3798                 adapter->total_tx_packets = 0;
3799                 adapter->total_rx_bytes = 0;
3800                 adapter->total_rx_packets = 0;
3801                 __netif_rx_schedule(netdev, &adapter->napi);
3802         } else
3803                 e1000_irq_enable(adapter);
3804 #else
3805         adapter->total_tx_bytes = 0;
3806         adapter->total_rx_bytes = 0;
3807         adapter->total_tx_packets = 0;
3808         adapter->total_rx_packets = 0;
3809
3810         for (i = 0; i < E1000_MAX_INTR; i++)
3811                 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3812                    !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3813                         break;
3814
3815         if (likely(adapter->itr_setting & 3))
3816                 e1000_set_itr(adapter);
3817 #endif
3818
3819         return IRQ_HANDLED;
3820 }
3821
3822 /**
3823  * e1000_intr - Interrupt Handler
3824  * @irq: interrupt number
3825  * @data: pointer to a network interface device structure
3826  **/
3827
3828 static irqreturn_t e1000_intr(int irq, void *data)
3829 {
3830         struct net_device *netdev = data;
3831         struct e1000_adapter *adapter = netdev_priv(netdev);
3832         struct e1000_hw *hw = &adapter->hw;
3833         u32 rctl, icr = er32(ICR);
3834 #ifndef CONFIG_E1000_NAPI
3835         int i;
3836 #endif
3837         if (unlikely(!icr))
3838                 return IRQ_NONE;  /* Not our interrupt */
3839
3840 #ifdef CONFIG_E1000_NAPI
3841         /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3842          * not set, then the adapter didn't send an interrupt */
3843         if (unlikely(hw->mac_type >= e1000_82571 &&
3844                      !(icr & E1000_ICR_INT_ASSERTED)))
3845                 return IRQ_NONE;
3846
3847         /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
3848          * need for the IMC write */
3849 #endif
3850
3851         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3852                 hw->get_link_status = 1;
3853                 /* 80003ES2LAN workaround--
3854                  * For packet buffer work-around on link down event;
3855                  * disable receives here in the ISR and
3856                  * reset adapter in watchdog
3857                  */
3858                 if (netif_carrier_ok(netdev) &&
3859                     (hw->mac_type == e1000_80003es2lan)) {
3860                         /* disable receives */
3861                         rctl = er32(RCTL);
3862                         ew32(RCTL, rctl & ~E1000_RCTL_EN);
3863                 }
3864                 /* guard against interrupt when we're going down */
3865                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3866                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3867         }
3868
3869 #ifdef CONFIG_E1000_NAPI
3870         if (unlikely(hw->mac_type < e1000_82571)) {
3871                 /* disable interrupts, without the synchronize_irq bit */
3872                 ew32(IMC, ~0);
3873                 E1000_WRITE_FLUSH();
3874         }
3875         if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
3876                 adapter->total_tx_bytes = 0;
3877                 adapter->total_tx_packets = 0;
3878                 adapter->total_rx_bytes = 0;
3879                 adapter->total_rx_packets = 0;
3880                 __netif_rx_schedule(netdev, &adapter->napi);
3881         } else
3882                 /* this really should not happen! if it does it is basically a
3883                  * bug, but not a hard error, so enable ints and continue */
3884                 e1000_irq_enable(adapter);
3885 #else
3886         /* Writing IMC and IMS is needed for 82547.
3887          * Due to Hub Link bus being occupied, an interrupt
3888          * de-assertion message is not able to be sent.
3889          * When an interrupt assertion message is generated later,
3890          * two messages are re-ordered and sent out.
3891          * That causes APIC to think 82547 is in de-assertion
3892          * state, while 82547 is in assertion state, resulting
3893          * in dead lock. Writing IMC forces 82547 into
3894          * de-assertion state.
3895          */
3896         if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3897                 ew32(IMC, ~0);
3898
3899         adapter->total_tx_bytes = 0;
3900         adapter->total_rx_bytes = 0;
3901         adapter->total_tx_packets = 0;
3902         adapter->total_rx_packets = 0;
3903
3904         for (i = 0; i < E1000_MAX_INTR; i++)
3905                 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3906                    !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3907                         break;
3908
3909         if (likely(adapter->itr_setting & 3))
3910                 e1000_set_itr(adapter);
3911
3912         if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3913                 e1000_irq_enable(adapter);
3914
3915 #endif
3916         return IRQ_HANDLED;
3917 }
3918
3919 #ifdef CONFIG_E1000_NAPI
3920 /**
3921  * e1000_clean - NAPI Rx polling callback
3922  * @adapter: board private structure
3923  **/
3924
3925 static int e1000_clean(struct napi_struct *napi, int budget)
3926 {
3927         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3928         struct net_device *poll_dev = adapter->netdev;
3929         int tx_cleaned = 0, work_done = 0;
3930
3931         /* Must NOT use netdev_priv macro here. */
3932         adapter = poll_dev->priv;
3933
3934         /* e1000_clean is called per-cpu.  This lock protects
3935          * tx_ring[0] from being cleaned by multiple cpus
3936          * simultaneously.  A failure obtaining the lock means
3937          * tx_ring[0] is currently being cleaned anyway. */
3938         if (spin_trylock(&adapter->tx_queue_lock)) {
3939                 tx_cleaned = e1000_clean_tx_irq(adapter,
3940                                                 &adapter->tx_ring[0]);
3941                 spin_unlock(&adapter->tx_queue_lock);
3942         }
3943
3944         adapter->clean_rx(adapter, &adapter->rx_ring[0],
3945                           &work_done, budget);
3946
3947         if (tx_cleaned)
3948                 work_done = budget;
3949
3950         /* If budget not fully consumed, exit the polling mode */
3951         if (work_done < budget) {
3952                 if (likely(adapter->itr_setting & 3))
3953                         e1000_set_itr(adapter);
3954                 netif_rx_complete(poll_dev, napi);
3955                 e1000_irq_enable(adapter);
3956         }
3957
3958         return work_done;
3959 }
3960
3961 #endif
3962 /**
3963  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3964  * @adapter: board private structure
3965  **/
3966
3967 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3968                                struct e1000_tx_ring *tx_ring)
3969 {
3970         struct e1000_hw *hw = &adapter->hw;
3971         struct net_device *netdev = adapter->netdev;
3972         struct e1000_tx_desc *tx_desc, *eop_desc;
3973         struct e1000_buffer *buffer_info;
3974         unsigned int i, eop;
3975 #ifdef CONFIG_E1000_NAPI
3976         unsigned int count = 0;
3977 #endif
3978         bool cleaned = false;
3979         unsigned int total_tx_bytes=0, total_tx_packets=0;
3980
3981         i = tx_ring->next_to_clean;
3982         eop = tx_ring->buffer_info[i].next_to_watch;
3983         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3984
3985         while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3986                 for (cleaned = false; !cleaned; ) {
3987                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3988                         buffer_info = &tx_ring->buffer_info[i];
3989                         cleaned = (i == eop);
3990
3991                         if (cleaned) {
3992                                 struct sk_buff *skb = buffer_info->skb;
3993                                 unsigned int segs, bytecount;
3994                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
3995                                 /* multiply data chunks by size of headers */
3996                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
3997                                             skb->len;
3998                                 total_tx_packets += segs;
3999                                 total_tx_bytes += bytecount;
4000                         }
4001                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
4002                         tx_desc->upper.data = 0;
4003
4004                         if (unlikely(++i == tx_ring->count)) i = 0;
4005                 }
4006
4007                 eop = tx_ring->buffer_info[i].next_to_watch;
4008                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4009 #ifdef CONFIG_E1000_NAPI
4010 #define E1000_TX_WEIGHT 64
4011                 /* weight of a sort for tx, to avoid endless transmit cleanup */
4012                 if (count++ == E1000_TX_WEIGHT) break;
4013 #endif
4014         }
4015
4016         tx_ring->next_to_clean = i;
4017
4018 #define TX_WAKE_THRESHOLD 32
4019         if (unlikely(cleaned && netif_carrier_ok(netdev) &&
4020                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
4021                 /* Make sure that anybody stopping the queue after this
4022                  * sees the new next_to_clean.
4023                  */
4024                 smp_mb();
4025                 if (netif_queue_stopped(netdev)) {
4026                         netif_wake_queue(netdev);
4027                         ++adapter->restart_queue;
4028                 }
4029         }
4030
4031         if (adapter->detect_tx_hung) {
4032                 /* Detect a transmit hang in hardware, this serializes the
4033                  * check with the clearing of time_stamp and movement of i */
4034                 adapter->detect_tx_hung = false;
4035                 if (tx_ring->buffer_info[eop].dma &&
4036                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
4037                                (adapter->tx_timeout_factor * HZ))
4038                     && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
4039
4040                         /* detected Tx unit hang */
4041                         DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
4042                                         "  Tx Queue             <%lu>\n"
4043                                         "  TDH                  <%x>\n"
4044                                         "  TDT                  <%x>\n"
4045                                         "  next_to_use          <%x>\n"
4046                                         "  next_to_clean        <%x>\n"
4047                                         "buffer_info[next_to_clean]\n"
4048                                         "  time_stamp           <%lx>\n"
4049                                         "  next_to_watch        <%x>\n"
4050                                         "  jiffies              <%lx>\n"
4051                                         "  next_to_watch.status <%x>\n",
4052                                 (unsigned long)((tx_ring - adapter->tx_ring) /
4053                                         sizeof(struct e1000_tx_ring)),
4054                                 readl(hw->hw_addr + tx_ring->tdh),
4055                                 readl(hw->hw_addr + tx_ring->tdt),
4056                                 tx_ring->next_to_use,
4057                                 tx_ring->next_to_clean,
4058                                 tx_ring->buffer_info[eop].time_stamp,
4059                                 eop,
4060                                 jiffies,
4061                                 eop_desc->upper.fields.status);
4062                         netif_stop_queue(netdev);
4063                 }
4064         }
4065         adapter->total_tx_bytes += total_tx_bytes;
4066         adapter->total_tx_packets += total_tx_packets;
4067         adapter->net_stats.tx_bytes += total_tx_bytes;
4068         adapter->net_stats.tx_packets += total_tx_packets;
4069         return cleaned;
4070 }
4071
4072 /**
4073  * e1000_rx_checksum - Receive Checksum Offload for 82543
4074  * @adapter:     board private structure
4075  * @status_err:  receive descriptor status and error fields
4076  * @csum:        receive descriptor csum field
4077  * @sk_buff:     socket buffer with received data
4078  **/
4079
4080 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
4081                               u32 csum, struct sk_buff *skb)
4082 {
4083         struct e1000_hw *hw = &adapter->hw;
4084         u16 status = (u16)status_err;
4085         u8 errors = (u8)(status_err >> 24);
4086         skb->ip_summed = CHECKSUM_NONE;
4087
4088         /* 82543 or newer only */
4089         if (unlikely(hw->mac_type < e1000_82543)) return;
4090         /* Ignore Checksum bit is set */
4091         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
4092         /* TCP/UDP checksum error bit is set */
4093         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
4094                 /* let the stack verify checksum errors */
4095                 adapter->hw_csum_err++;
4096                 return;
4097         }
4098         /* TCP/UDP Checksum has not been calculated */
4099         if (hw->mac_type <= e1000_82547_rev_2) {
4100                 if (!(status & E1000_RXD_STAT_TCPCS))
4101                         return;
4102         } else {
4103                 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4104                         return;
4105         }
4106         /* It must be a TCP or UDP packet with a valid checksum */
4107         if (likely(status & E1000_RXD_STAT_TCPCS)) {
4108                 /* TCP checksum is good */
4109                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4110         } else if (hw->mac_type > e1000_82547_rev_2) {
4111                 /* IP fragment with UDP payload */
4112                 /* Hardware complements the payload checksum, so we undo it
4113                  * and then put the value in host order for further stack use.
4114                  */
4115                 __sum16 sum = (__force __sum16)htons(csum);
4116                 skb->csum = csum_unfold(~sum);
4117                 skb->ip_summed = CHECKSUM_COMPLETE;
4118         }
4119         adapter->hw_csum_good++;
4120 }
4121
4122 /**
4123  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4124  * @adapter: board private structure
4125  **/
4126 #ifdef CONFIG_E1000_NAPI
4127 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4128                                struct e1000_rx_ring *rx_ring,
4129                                int *work_done, int work_to_do)
4130 #else
4131 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4132                                struct e1000_rx_ring *rx_ring)
4133 #endif
4134 {
4135         struct e1000_hw *hw = &adapter->hw;
4136         struct net_device *netdev = adapter->netdev;
4137         struct pci_dev *pdev = adapter->pdev;
4138         struct e1000_rx_desc *rx_desc, *next_rxd;
4139         struct e1000_buffer *buffer_info, *next_buffer;
4140         unsigned long flags;
4141         u32 length;
4142         u8 last_byte;
4143         unsigned int i;
4144         int cleaned_count = 0;
4145         bool cleaned = false;
4146         unsigned int total_rx_bytes=0, total_rx_packets=0;
4147
4148         i = rx_ring->next_to_clean;
4149         rx_desc = E1000_RX_DESC(*rx_ring, i);
4150         buffer_info = &rx_ring->buffer_info[i];
4151
4152         while (rx_desc->status & E1000_RXD_STAT_DD) {
4153                 struct sk_buff *skb;
4154                 u8 status;
4155
4156 #ifdef CONFIG_E1000_NAPI
4157                 if (*work_done >= work_to_do)
4158                         break;
4159                 (*work_done)++;
4160 #endif
4161                 status = rx_desc->status;
4162                 skb = buffer_info->skb;
4163                 buffer_info->skb = NULL;
4164
4165                 prefetch(skb->data - NET_IP_ALIGN);
4166
4167                 if (++i == rx_ring->count) i = 0;
4168                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4169                 prefetch(next_rxd);
4170
4171                 next_buffer = &rx_ring->buffer_info[i];
4172
4173                 cleaned = true;
4174                 cleaned_count++;
4175                 pci_unmap_single(pdev,
4176                                  buffer_info->dma,
4177                                  buffer_info->length,
4178                                  PCI_DMA_FROMDEVICE);
4179
4180                 length = le16_to_cpu(rx_desc->length);
4181
4182                 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4183                         /* All receives must fit into a single buffer */
4184                         E1000_DBG("%s: Receive packet consumed multiple"
4185                                   " buffers\n", netdev->name);
4186                         /* recycle */
4187                         buffer_info->skb = skb;
4188                         goto next_desc;
4189                 }
4190
4191                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4192                         last_byte = *(skb->data + length - 1);
4193                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4194                                        last_byte)) {
4195                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4196                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4197                                                        length, skb->data);
4198                                 spin_unlock_irqrestore(&adapter->stats_lock,
4199                                                        flags);
4200                                 length--;
4201                         } else {
4202                                 /* recycle */
4203                                 buffer_info->skb = skb;
4204                                 goto next_desc;
4205                         }
4206                 }
4207
4208                 /* adjust length to remove Ethernet CRC, this must be
4209                  * done after the TBI_ACCEPT workaround above */
4210                 length -= 4;
4211
4212                 /* probably a little skewed due to removing CRC */
4213                 total_rx_bytes += length;
4214                 total_rx_packets++;
4215
4216                 /* code added for copybreak, this should improve
4217                  * performance for small packets with large amounts
4218                  * of reassembly being done in the stack */
4219                 if (length < copybreak) {
4220                         struct sk_buff *new_skb =
4221                             netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
4222                         if (new_skb) {
4223                                 skb_reserve(new_skb, NET_IP_ALIGN);
4224                                 skb_copy_to_linear_data_offset(new_skb,
4225                                                                -NET_IP_ALIGN,
4226                                                                (skb->data -
4227                                                                 NET_IP_ALIGN),
4228                                                                (length +
4229                                                                 NET_IP_ALIGN));
4230                                 /* save the skb in buffer_info as good */
4231                                 buffer_info->skb = skb;
4232                                 skb = new_skb;
4233                         }
4234                         /* else just continue with the old one */
4235                 }
4236                 /* end copybreak code */
4237                 skb_put(skb, length);
4238
4239                 /* Receive Checksum Offload */
4240                 e1000_rx_checksum(adapter,
4241                                   (u32)(status) |
4242                                   ((u32)(rx_desc->errors) << 24),
4243                                   le16_to_cpu(rx_desc->csum), skb);
4244
4245                 skb->protocol = eth_type_trans(skb, netdev);
4246 #ifdef CONFIG_E1000_NAPI
4247                 if (unlikely(adapter->vlgrp &&
4248                             (status & E1000_RXD_STAT_VP))) {
4249                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4250                                                  le16_to_cpu(rx_desc->special));
4251                 } else {
4252                         netif_receive_skb(skb);
4253                 }
4254 #else /* CONFIG_E1000_NAPI */
4255                 if (unlikely(adapter->vlgrp &&
4256                             (status & E1000_RXD_STAT_VP))) {
4257                         vlan_hwaccel_rx(skb, adapter->vlgrp,
4258                                         le16_to_cpu(rx_desc->special));
4259                 } else {
4260                         netif_rx(skb);
4261                 }
4262 #endif /* CONFIG_E1000_NAPI */
4263                 netdev->last_rx = jiffies;
4264
4265 next_desc:
4266                 rx_desc->status = 0;
4267
4268                 /* return some buffers to hardware, one at a time is too slow */
4269                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4270                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4271                         cleaned_count = 0;
4272                 }
4273
4274                 /* use prefetched values */
4275                 rx_desc = next_rxd;
4276                 buffer_info = next_buffer;
4277         }
4278         rx_ring->next_to_clean = i;
4279
4280         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4281         if (cleaned_count)
4282                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4283
4284         adapter->total_rx_packets += total_rx_packets;
4285         adapter->total_rx_bytes += total_rx_bytes;
4286         adapter->net_stats.rx_bytes += total_rx_bytes;
4287         adapter->net_stats.rx_packets += total_rx_packets;
4288         return cleaned;
4289 }
4290
4291 /**
4292  * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4293  * @adapter: board private structure
4294  **/
4295
4296 #ifdef CONFIG_E1000_NAPI
4297 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4298                                   struct e1000_rx_ring *rx_ring,
4299                                   int *work_done, int work_to_do)
4300 #else
4301 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4302                                   struct e1000_rx_ring *rx_ring)
4303 #endif
4304 {
4305         union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4306         struct net_device *netdev = adapter->netdev;
4307         struct pci_dev *pdev = adapter->pdev;
4308         struct e1000_buffer *buffer_info, *next_buffer;
4309         struct e1000_ps_page *ps_page;
4310         struct e1000_ps_page_dma *ps_page_dma;
4311         struct sk_buff *skb;
4312         unsigned int i, j;
4313         u32 length, staterr;
4314         int cleaned_count = 0;
4315         bool cleaned = false;
4316         unsigned int total_rx_bytes=0, total_rx_packets=0;
4317
4318         i = rx_ring->next_to_clean;
4319         rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4320         staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4321         buffer_info = &rx_ring->buffer_info[i];
4322
4323         while (staterr & E1000_RXD_STAT_DD) {
4324                 ps_page = &rx_ring->ps_page[i];
4325                 ps_page_dma = &rx_ring->ps_page_dma[i];
4326 #ifdef CONFIG_E1000_NAPI
4327                 if (unlikely(*work_done >= work_to_do))
4328                         break;
4329                 (*work_done)++;
4330 #endif
4331                 skb = buffer_info->skb;
4332
4333                 /* in the packet split case this is header only */
4334                 prefetch(skb->data - NET_IP_ALIGN);
4335
4336                 if (++i == rx_ring->count) i = 0;
4337                 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4338                 prefetch(next_rxd);
4339
4340                 next_buffer = &rx_ring->buffer_info[i];
4341
4342                 cleaned = true;
4343                 cleaned_count++;
4344                 pci_unmap_single(pdev, buffer_info->dma,
4345                                  buffer_info->length,
4346                                  PCI_DMA_FROMDEVICE);
4347
4348                 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4349                         E1000_DBG("%s: Packet Split buffers didn't pick up"
4350                                   " the full packet\n", netdev->name);
4351                         dev_kfree_skb_irq(skb);
4352                         goto next_desc;
4353                 }
4354
4355                 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4356                         dev_kfree_skb_irq(skb);
4357                         goto next_desc;
4358                 }
4359
4360                 length = le16_to_cpu(rx_desc->wb.middle.length0);
4361
4362                 if (unlikely(!length)) {
4363                         E1000_DBG("%s: Last part of the packet spanning"
4364                                   " multiple descriptors\n", netdev->name);
4365                         dev_kfree_skb_irq(skb);
4366                         goto next_desc;
4367                 }
4368
4369                 /* Good Receive */
4370                 skb_put(skb, length);
4371
4372                 {
4373                 /* this looks ugly, but it seems compiler issues make it
4374                    more efficient than reusing j */
4375                 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4376
4377                 /* page alloc/put takes too long and effects small packet
4378                  * throughput, so unsplit small packets and save the alloc/put*/
4379                 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
4380                         u8 *vaddr;
4381                         /* there is no documentation about how to call
4382                          * kmap_atomic, so we can't hold the mapping
4383                          * very long */
4384                         pci_dma_sync_single_for_cpu(pdev,
4385                                 ps_page_dma->ps_page_dma[0],
4386                                 PAGE_SIZE,
4387                                 PCI_DMA_FROMDEVICE);
4388                         vaddr = kmap_atomic(ps_page->ps_page[0],
4389                                             KM_SKB_DATA_SOFTIRQ);
4390                         memcpy(skb_tail_pointer(skb), vaddr, l1);
4391                         kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4392                         pci_dma_sync_single_for_device(pdev,
4393                                 ps_page_dma->ps_page_dma[0],
4394                                 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4395                         /* remove the CRC */
4396                         l1 -= 4;
4397                         skb_put(skb, l1);
4398                         goto copydone;
4399                 } /* if */
4400                 }
4401
4402                 for (j = 0; j < adapter->rx_ps_pages; j++) {
4403                         if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
4404                                 break;
4405                         pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4406                                         PAGE_SIZE, PCI_DMA_FROMDEVICE);
4407                         ps_page_dma->ps_page_dma[j] = 0;
4408                         skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4409                                            length);
4410                         ps_page->ps_page[j] = NULL;
4411                         skb->len += length;
4412                         skb->data_len += length;
4413                         skb->truesize += length;
4414                 }
4415
4416                 /* strip the ethernet crc, problem is we're using pages now so
4417                  * this whole operation can get a little cpu intensive */
4418                 pskb_trim(skb, skb->len - 4);
4419
4420 copydone:
4421                 total_rx_bytes += skb->len;
4422                 total_rx_packets++;
4423
4424                 e1000_rx_checksum(adapter, staterr,
4425                                   le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4426                 skb->protocol = eth_type_trans(skb, netdev);
4427
4428                 if (likely(rx_desc->wb.upper.header_status &
4429                            cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4430                         adapter->rx_hdr_split++;
4431 #ifdef CONFIG_E1000_NAPI
4432                 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4433                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4434                                 le16_to_cpu(rx_desc->wb.middle.vlan));
4435                 } else {
4436                         netif_receive_skb(skb);
4437                 }
4438 #else /* CONFIG_E1000_NAPI */
4439                 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4440                         vlan_hwaccel_rx(skb, adapter->vlgrp,
4441                                 le16_to_cpu(rx_desc->wb.middle.vlan));
4442                 } else {
4443                         netif_rx(skb);
4444                 }
4445 #endif /* CONFIG_E1000_NAPI */
4446                 netdev->last_rx = jiffies;
4447
4448 next_desc:
4449                 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4450                 buffer_info->skb = NULL;
4451
4452                 /* return some buffers to hardware, one at a time is too slow */
4453                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4454                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4455                         cleaned_count = 0;
4456                 }
4457
4458                 /* use prefetched values */
4459                 rx_desc = next_rxd;
4460                 buffer_info = next_buffer;
4461
4462                 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4463         }
4464         rx_ring->next_to_clean = i;
4465
4466         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4467         if (cleaned_count)
4468                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4469
4470         adapter->total_rx_packets += total_rx_packets;
4471         adapter->total_rx_bytes += total_rx_bytes;
4472         adapter->net_stats.rx_bytes += total_rx_bytes;
4473         adapter->net_stats.rx_packets += total_rx_packets;
4474         return cleaned;
4475 }
4476
4477 /**
4478  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4479  * @adapter: address of board private structure
4480  **/
4481
4482 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4483                                    struct e1000_rx_ring *rx_ring,
4484                                    int cleaned_count)
4485 {
4486         struct e1000_hw *hw = &adapter->hw;
4487         struct net_device *netdev = adapter->netdev;
4488         struct pci_dev *pdev = adapter->pdev;
4489         struct e1000_rx_desc *rx_desc;
4490         struct e1000_buffer *buffer_info;
4491         struct sk_buff *skb;
4492         unsigned int i;
4493         unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
4494
4495         i = rx_ring->next_to_use;
4496         buffer_info = &rx_ring->buffer_info[i];
4497
4498         while (cleaned_count--) {
4499                 skb = buffer_info->skb;
4500                 if (skb) {
4501                         skb_trim(skb, 0);
4502                         goto map_skb;
4503                 }
4504
4505                 skb = netdev_alloc_skb(netdev, bufsz);
4506                 if (unlikely(!skb)) {
4507                         /* Better luck next round */
4508                         adapter->alloc_rx_buff_failed++;
4509                         break;
4510                 }
4511
4512                 /* Fix for errata 23, can't cross 64kB boundary */
4513                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4514                         struct sk_buff *oldskb = skb;
4515                         DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4516                                              "at %p\n", bufsz, skb->data);
4517                         /* Try again, without freeing the previous */
4518                         skb = netdev_alloc_skb(netdev, bufsz);
4519                         /* Failed allocation, critical failure */
4520                         if (!skb) {
4521                                 dev_kfree_skb(oldskb);
4522                                 break;
4523                         }
4524
4525                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4526                                 /* give up */
4527                                 dev_kfree_skb(skb);
4528                                 dev_kfree_skb(oldskb);
4529                                 break; /* while !buffer_info->skb */
4530                         }
4531
4532                         /* Use new allocation */
4533                         dev_kfree_skb(oldskb);
4534                 }
4535                 /* Make buffer alignment 2 beyond a 16 byte boundary
4536                  * this will result in a 16 byte aligned IP header after
4537                  * the 14 byte MAC header is removed
4538                  */
4539                 skb_reserve(skb, NET_IP_ALIGN);
4540
4541                 buffer_info->skb = skb;
4542                 buffer_info->length = adapter->rx_buffer_len;
4543 map_skb:
4544                 buffer_info->dma = pci_map_single(pdev,
4545                                                   skb->data,
4546                                                   adapter->rx_buffer_len,
4547                                                   PCI_DMA_FROMDEVICE);
4548
4549                 /* Fix for errata 23, can't cross 64kB boundary */
4550                 if (!e1000_check_64k_bound(adapter,
4551                                         (void *)(unsigned long)buffer_info->dma,
4552                                         adapter->rx_buffer_len)) {
4553                         DPRINTK(RX_ERR, ERR,
4554                                 "dma align check failed: %u bytes at %p\n",
4555                                 adapter->rx_buffer_len,
4556                                 (void *)(unsigned long)buffer_info->dma);
4557                         dev_kfree_skb(skb);
4558                         buffer_info->skb = NULL;
4559
4560                         pci_unmap_single(pdev, buffer_info->dma,
4561                                          adapter->rx_buffer_len,
4562                                          PCI_DMA_FROMDEVICE);
4563
4564                         break; /* while !buffer_info->skb */
4565                 }
4566                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4567                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4568
4569                 if (unlikely(++i == rx_ring->count))
4570                         i = 0;
4571                 buffer_info = &rx_ring->buffer_info[i];
4572         }
4573
4574         if (likely(rx_ring->next_to_use != i)) {
4575                 rx_ring->next_to_use = i;
4576                 if (unlikely(i-- == 0))
4577                         i = (rx_ring->count - 1);
4578
4579                 /* Force memory writes to complete before letting h/w
4580                  * know there are new descriptors to fetch.  (Only
4581                  * applicable for weak-ordered memory model archs,
4582                  * such as IA-64). */
4583                 wmb();
4584                 writel(i, hw->hw_addr + rx_ring->rdt);
4585         }
4586 }
4587
4588 /**
4589  * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4590  * @adapter: address of board private structure
4591  **/
4592
4593 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4594                                       struct e1000_rx_ring *rx_ring,
4595                                       int cleaned_count)
4596 {
4597         struct e1000_hw *hw = &adapter->hw;
4598         struct net_device *netdev = adapter->netdev;
4599         struct pci_dev *pdev = adapter->pdev;
4600         union e1000_rx_desc_packet_split *rx_desc;
4601         struct e1000_buffer *buffer_info;
4602         struct e1000_ps_page *ps_page;
4603         struct e1000_ps_page_dma *ps_page_dma;
4604         struct sk_buff *skb;
4605         unsigned int i, j;
4606
4607         i = rx_ring->next_to_use;
4608         buffer_info = &rx_ring->buffer_info[i];
4609         ps_page = &rx_ring->ps_page[i];
4610         ps_page_dma = &rx_ring->ps_page_dma[i];
4611
4612         while (cleaned_count--) {
4613                 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4614
4615                 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4616                         if (j < adapter->rx_ps_pages) {
4617                                 if (likely(!ps_page->ps_page[j])) {
4618                                         ps_page->ps_page[j] =
4619                                                 alloc_page(GFP_ATOMIC);
4620                                         if (unlikely(!ps_page->ps_page[j])) {
4621                                                 adapter->alloc_rx_buff_failed++;
4622                                                 goto no_buffers;
4623                                         }
4624                                         ps_page_dma->ps_page_dma[j] =
4625                                                 pci_map_page(pdev,
4626                                                             ps_page->ps_page[j],
4627                                                             0, PAGE_SIZE,
4628                                                             PCI_DMA_FROMDEVICE);
4629                                 }
4630                                 /* Refresh the desc even if buffer_addrs didn't
4631                                  * change because each write-back erases
4632                                  * this info.
4633                                  */
4634                                 rx_desc->read.buffer_addr[j+1] =
4635                                      cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4636                         } else
4637                                 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
4638                 }
4639
4640                 skb = netdev_alloc_skb(netdev,
4641                                        adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4642
4643                 if (unlikely(!skb)) {
4644                         adapter->alloc_rx_buff_failed++;
4645                         break;
4646                 }
4647
4648                 /* Make buffer alignment 2 beyond a 16 byte boundary
4649                  * this will result in a 16 byte aligned IP header after
4650                  * the 14 byte MAC header is removed
4651                  */
4652                 skb_reserve(skb, NET_IP_ALIGN);
4653
4654                 buffer_info->skb = skb;
4655                 buffer_info->length = adapter->rx_ps_bsize0;
4656                 buffer_info->dma = pci_map_single(pdev, skb->data,
4657                                                   adapter->rx_ps_bsize0,
4658                                                   PCI_DMA_FROMDEVICE);
4659
4660                 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4661
4662                 if (unlikely(++i == rx_ring->count)) i = 0;
4663                 buffer_info = &rx_ring->buffer_info[i];
4664                 ps_page = &rx_ring->ps_page[i];
4665                 ps_page_dma = &rx_ring->ps_page_dma[i];
4666         }
4667
4668 no_buffers:
4669         if (likely(rx_ring->next_to_use != i)) {
4670                 rx_ring->next_to_use = i;
4671                 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4672
4673                 /* Force memory writes to complete before letting h/w
4674                  * know there are new descriptors to fetch.  (Only
4675                  * applicable for weak-ordered memory model archs,
4676                  * such as IA-64). */
4677                 wmb();
4678                 /* Hardware increments by 16 bytes, but packet split
4679                  * descriptors are 32 bytes...so we increment tail
4680                  * twice as much.
4681                  */
4682                 writel(i<<1, hw->hw_addr + rx_ring->rdt);
4683         }
4684 }
4685
4686 /**
4687  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4688  * @adapter:
4689  **/
4690
4691 static void e1000_smartspeed(struct e1000_adapter *adapter)
4692 {
4693         struct e1000_hw *hw = &adapter->hw;
4694         u16 phy_status;
4695         u16 phy_ctrl;
4696
4697         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4698            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4699                 return;
4700
4701         if (adapter->smartspeed == 0) {
4702                 /* If Master/Slave config fault is asserted twice,
4703                  * we assume back-to-back */
4704                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4705                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4706                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4707                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4708                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4709                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4710                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4711                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4712                                             phy_ctrl);
4713                         adapter->smartspeed++;
4714                         if (!e1000_phy_setup_autoneg(hw) &&
4715                            !e1000_read_phy_reg(hw, PHY_CTRL,
4716                                                &phy_ctrl)) {
4717                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4718                                              MII_CR_RESTART_AUTO_NEG);
4719                                 e1000_write_phy_reg(hw, PHY_CTRL,
4720                                                     phy_ctrl);
4721                         }
4722                 }
4723                 return;
4724         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4725                 /* If still no link, perhaps using 2/3 pair cable */
4726                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4727                 phy_ctrl |= CR_1000T_MS_ENABLE;
4728                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4729                 if (!e1000_phy_setup_autoneg(hw) &&
4730                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4731                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4732                                      MII_CR_RESTART_AUTO_NEG);
4733                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4734                 }
4735         }
4736         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4737         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4738                 adapter->smartspeed = 0;
4739 }
4740
4741 /**
4742  * e1000_ioctl -
4743  * @netdev:
4744  * @ifreq:
4745  * @cmd:
4746  **/
4747
4748 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4749 {
4750         switch (cmd) {
4751         case SIOCGMIIPHY:
4752         case SIOCGMIIREG:
4753         case SIOCSMIIREG:
4754                 return e1000_mii_ioctl(netdev, ifr, cmd);
4755         default:
4756                 return -EOPNOTSUPP;
4757         }
4758 }
4759
4760 /**
4761  * e1000_mii_ioctl -
4762  * @netdev:
4763  * @ifreq:
4764  * @cmd:
4765  **/
4766
4767 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4768                            int cmd)
4769 {
4770         struct e1000_adapter *adapter = netdev_priv(netdev);
4771         struct e1000_hw *hw = &adapter->hw;
4772         struct mii_ioctl_data *data = if_mii(ifr);
4773         int retval;
4774         u16 mii_reg;
4775         u16 spddplx;
4776         unsigned long flags;
4777
4778         if (hw->media_type != e1000_media_type_copper)
4779                 return -EOPNOTSUPP;
4780
4781         switch (cmd) {
4782         case SIOCGMIIPHY:
4783                 data->phy_id = hw->phy_addr;
4784                 break;
4785         case SIOCGMIIREG:
4786                 if (!capable(CAP_NET_ADMIN))
4787                         return -EPERM;
4788                 spin_lock_irqsave(&adapter->stats_lock, flags);
4789                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4790                                    &data->val_out)) {
4791                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4792                         return -EIO;
4793                 }
4794                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4795                 break;
4796         case SIOCSMIIREG:
4797                 if (!capable(CAP_NET_ADMIN))
4798                         return -EPERM;
4799                 if (data->reg_num & ~(0x1F))
4800                         return -EFAULT;
4801                 mii_reg = data->val_in;
4802                 spin_lock_irqsave(&adapter->stats_lock, flags);
4803                 if (e1000_write_phy_reg(hw, data->reg_num,
4804                                         mii_reg)) {
4805                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4806                         return -EIO;
4807                 }
4808                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4809                 if (hw->media_type == e1000_media_type_copper) {
4810                         switch (data->reg_num) {
4811                         case PHY_CTRL:
4812                                 if (mii_reg & MII_CR_POWER_DOWN)
4813                                         break;
4814                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4815                                         hw->autoneg = 1;
4816                                         hw->autoneg_advertised = 0x2F;
4817                                 } else {
4818                                         if (mii_reg & 0x40)
4819                                                 spddplx = SPEED_1000;
4820                                         else if (mii_reg & 0x2000)
4821                                                 spddplx = SPEED_100;
4822                                         else
4823                                                 spddplx = SPEED_10;
4824                                         spddplx += (mii_reg & 0x100)
4825                                                    ? DUPLEX_FULL :
4826                                                    DUPLEX_HALF;
4827                                         retval = e1000_set_spd_dplx(adapter,
4828                                                                     spddplx);
4829                                         if (retval)
4830                                                 return retval;
4831                                 }
4832                                 if (netif_running(adapter->netdev))
4833                                         e1000_reinit_locked(adapter);
4834                                 else
4835                                         e1000_reset(adapter);
4836                                 break;
4837                         case M88E1000_PHY_SPEC_CTRL:
4838                         case M88E1000_EXT_PHY_SPEC_CTRL:
4839                                 if (e1000_phy_reset(hw))
4840                                         return -EIO;
4841                                 break;
4842                         }
4843                 } else {
4844                         switch (data->reg_num) {
4845                         case PHY_CTRL:
4846                                 if (mii_reg & MII_CR_POWER_DOWN)
4847                                         break;
4848                                 if (netif_running(adapter->netdev))
4849                                         e1000_reinit_locked(adapter);
4850                                 else
4851                                         e1000_reset(adapter);
4852                                 break;
4853                         }
4854                 }
4855                 break;
4856         default:
4857                 return -EOPNOTSUPP;
4858         }
4859         return E1000_SUCCESS;
4860 }
4861
4862 void e1000_pci_set_mwi(struct e1000_hw *hw)
4863 {
4864         struct e1000_adapter *adapter = hw->back;
4865         int ret_val = pci_set_mwi(adapter->pdev);
4866
4867         if (ret_val)
4868                 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4869 }
4870
4871 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4872 {
4873         struct e1000_adapter *adapter = hw->back;
4874
4875         pci_clear_mwi(adapter->pdev);
4876 }
4877
4878 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4879 {
4880         struct e1000_adapter *adapter = hw->back;
4881         return pcix_get_mmrbc(adapter->pdev);
4882 }
4883
4884 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4885 {
4886         struct e1000_adapter *adapter = hw->back;
4887         pcix_set_mmrbc(adapter->pdev, mmrbc);
4888 }
4889
4890 s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4891 {
4892     struct e1000_adapter *adapter = hw->back;
4893     u16 cap_offset;
4894
4895     cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4896     if (!cap_offset)
4897         return -E1000_ERR_CONFIG;
4898
4899     pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4900
4901     return E1000_SUCCESS;
4902 }
4903
4904 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4905 {
4906         outl(value, port);
4907 }
4908
4909 static void e1000_vlan_rx_register(struct net_device *netdev,
4910                                    struct vlan_group *grp)
4911 {
4912         struct e1000_adapter *adapter = netdev_priv(netdev);
4913         struct e1000_hw *hw = &adapter->hw;
4914         u32 ctrl, rctl;
4915
4916         if (!test_bit(__E1000_DOWN, &adapter->flags))
4917                 e1000_irq_disable(adapter);
4918         adapter->vlgrp = grp;
4919
4920         if (grp) {
4921                 /* enable VLAN tag insert/strip */
4922                 ctrl = er32(CTRL);
4923                 ctrl |= E1000_CTRL_VME;
4924                 ew32(CTRL, ctrl);
4925
4926                 if (adapter->hw.mac_type != e1000_ich8lan) {
4927                         /* enable VLAN receive filtering */
4928                         rctl = er32(RCTL);
4929                         rctl &= ~E1000_RCTL_CFIEN;
4930                         ew32(RCTL, rctl);
4931                         e1000_update_mng_vlan(adapter);
4932                 }
4933         } else {
4934                 /* disable VLAN tag insert/strip */
4935                 ctrl = er32(CTRL);
4936                 ctrl &= ~E1000_CTRL_VME;
4937                 ew32(CTRL, ctrl);
4938
4939                 if (adapter->hw.mac_type != e1000_ich8lan) {
4940                         if (adapter->mng_vlan_id !=
4941                             (u16)E1000_MNG_VLAN_NONE) {
4942                                 e1000_vlan_rx_kill_vid(netdev,
4943                                                        adapter->mng_vlan_id);
4944                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4945                         }
4946                 }
4947         }
4948
4949         if (!test_bit(__E1000_DOWN, &adapter->flags))
4950                 e1000_irq_enable(adapter);
4951 }
4952
4953 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4954 {
4955         struct e1000_adapter *adapter = netdev_priv(netdev);
4956         struct e1000_hw *hw = &adapter->hw;
4957         u32 vfta, index;
4958
4959         if ((hw->mng_cookie.status &
4960              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4961             (vid == adapter->mng_vlan_id))
4962                 return;
4963         /* add VID to filter table */
4964         index = (vid >> 5) & 0x7F;
4965         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4966         vfta |= (1 << (vid & 0x1F));
4967         e1000_write_vfta(hw, index, vfta);
4968 }
4969
4970 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4971 {
4972         struct e1000_adapter *adapter = netdev_priv(netdev);
4973         struct e1000_hw *hw = &adapter->hw;
4974         u32 vfta, index;
4975
4976         if (!test_bit(__E1000_DOWN, &adapter->flags))
4977                 e1000_irq_disable(adapter);
4978         vlan_group_set_device(adapter->vlgrp, vid, NULL);
4979         if (!test_bit(__E1000_DOWN, &adapter->flags))
4980                 e1000_irq_enable(adapter);
4981
4982         if ((hw->mng_cookie.status &
4983              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4984             (vid == adapter->mng_vlan_id)) {
4985                 /* release control to f/w */
4986                 e1000_release_hw_control(adapter);
4987                 return;
4988         }
4989
4990         /* remove VID from filter table */
4991         index = (vid >> 5) & 0x7F;
4992         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4993         vfta &= ~(1 << (vid & 0x1F));
4994         e1000_write_vfta(hw, index, vfta);
4995 }
4996
4997 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4998 {
4999         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5000
5001         if (adapter->vlgrp) {
5002                 u16 vid;
5003                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5004                         if (!vlan_group_get_device(adapter->vlgrp, vid))
5005                                 continue;
5006                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
5007                 }
5008         }
5009 }
5010
5011 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
5012 {
5013         struct e1000_hw *hw = &adapter->hw;
5014
5015         hw->autoneg = 0;
5016
5017         /* Fiber NICs only allow 1000 gbps Full duplex */
5018         if ((hw->media_type == e1000_media_type_fiber) &&
5019                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5020                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5021                 return -EINVAL;
5022         }
5023
5024         switch (spddplx) {
5025         case SPEED_10 + DUPLEX_HALF:
5026                 hw->forced_speed_duplex = e1000_10_half;
5027                 break;
5028         case SPEED_10 + DUPLEX_FULL:
5029                 hw->forced_speed_duplex = e1000_10_full;
5030                 break;
5031         case SPEED_100 + DUPLEX_HALF:
5032                 hw->forced_speed_duplex = e1000_100_half;
5033                 break;
5034         case SPEED_100 + DUPLEX_FULL:
5035                 hw->forced_speed_duplex = e1000_100_full;
5036                 break;
5037         case SPEED_1000 + DUPLEX_FULL:
5038                 hw->autoneg = 1;
5039                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5040                 break;
5041         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5042         default:
5043                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5044                 return -EINVAL;
5045         }
5046         return 0;
5047 }
5048
5049 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5050 {
5051         struct net_device *netdev = pci_get_drvdata(pdev);
5052         struct e1000_adapter *adapter = netdev_priv(netdev);
5053         struct e1000_hw *hw = &adapter->hw;
5054         u32 ctrl, ctrl_ext, rctl, status;
5055         u32 wufc = adapter->wol;
5056 #ifdef CONFIG_PM
5057         int retval = 0;
5058 #endif
5059
5060         netif_device_detach(netdev);
5061
5062         if (netif_running(netdev)) {
5063                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5064                 e1000_down(adapter);
5065         }
5066
5067 #ifdef CONFIG_PM
5068         retval = pci_save_state(pdev);
5069         if (retval)
5070                 return retval;
5071 #endif
5072
5073         status = er32(STATUS);
5074         if (status & E1000_STATUS_LU)
5075                 wufc &= ~E1000_WUFC_LNKC;
5076
5077         if (wufc) {
5078                 e1000_setup_rctl(adapter);
5079                 e1000_set_rx_mode(netdev);
5080
5081                 /* turn on all-multi mode if wake on multicast is enabled */
5082                 if (wufc & E1000_WUFC_MC) {
5083                         rctl = er32(RCTL);
5084                         rctl |= E1000_RCTL_MPE;
5085                         ew32(RCTL, rctl);
5086                 }
5087
5088                 if (hw->mac_type >= e1000_82540) {
5089                         ctrl = er32(CTRL);
5090                         /* advertise wake from D3Cold */
5091                         #define E1000_CTRL_ADVD3WUC 0x00100000
5092                         /* phy power management enable */
5093                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5094                         ctrl |= E1000_CTRL_ADVD3WUC |
5095                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5096                         ew32(CTRL, ctrl);
5097                 }
5098
5099                 if (hw->media_type == e1000_media_type_fiber ||
5100                    hw->media_type == e1000_media_type_internal_serdes) {
5101                         /* keep the laser running in D3 */
5102                         ctrl_ext = er32(CTRL_EXT);
5103                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5104                         ew32(CTRL_EXT, ctrl_ext);
5105                 }
5106
5107                 /* Allow time for pending master requests to run */
5108                 e1000_disable_pciex_master(hw);
5109
5110                 ew32(WUC, E1000_WUC_PME_EN);
5111                 ew32(WUFC, wufc);
5112                 pci_enable_wake(pdev, PCI_D3hot, 1);
5113                 pci_enable_wake(pdev, PCI_D3cold, 1);
5114         } else {
5115                 ew32(WUC, 0);
5116                 ew32(WUFC, 0);
5117                 pci_enable_wake(pdev, PCI_D3hot, 0);
5118                 pci_enable_wake(pdev, PCI_D3cold, 0);
5119         }
5120
5121         e1000_release_manageability(adapter);
5122
5123         /* make sure adapter isn't asleep if manageability is enabled */
5124         if (adapter->en_mng_pt) {
5125                 pci_enable_wake(pdev, PCI_D3hot, 1);
5126                 pci_enable_wake(pdev, PCI_D3cold, 1);
5127         }
5128
5129         if (hw->phy_type == e1000_phy_igp_3)
5130                 e1000_phy_powerdown_workaround(hw);
5131
5132         if (netif_running(netdev))
5133                 e1000_free_irq(adapter);
5134
5135         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
5136          * would have already happened in close and is redundant. */
5137         e1000_release_hw_control(adapter);
5138
5139         pci_disable_device(pdev);
5140
5141         pci_set_power_state(pdev, pci_choose_state(pdev, state));
5142
5143         return 0;
5144 }
5145
5146 #ifdef CONFIG_PM
5147 static int e1000_resume(struct pci_dev *pdev)
5148 {
5149         struct net_device *netdev = pci_get_drvdata(pdev);
5150         struct e1000_adapter *adapter = netdev_priv(netdev);
5151         struct e1000_hw *hw = &adapter->hw;
5152         u32 err;
5153
5154         pci_set_power_state(pdev, PCI_D0);
5155         pci_restore_state(pdev);
5156         if ((err = pci_enable_device(pdev))) {
5157                 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
5158                 return err;
5159         }
5160         pci_set_master(pdev);
5161
5162         pci_enable_wake(pdev, PCI_D3hot, 0);
5163         pci_enable_wake(pdev, PCI_D3cold, 0);
5164
5165         if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
5166                 return err;
5167
5168         e1000_power_up_phy(adapter);
5169         e1000_reset(adapter);
5170         ew32(WUS, ~0);
5171
5172         e1000_init_manageability(adapter);
5173
5174         if (netif_running(netdev))
5175                 e1000_up(adapter);
5176
5177         netif_device_attach(netdev);
5178
5179         /* If the controller is 82573 and f/w is AMT, do not set
5180          * DRV_LOAD until the interface is up.  For all other cases,
5181          * let the f/w know that the h/w is now under the control
5182          * of the driver. */
5183         if (hw->mac_type != e1000_82573 ||
5184             !e1000_check_mng_mode(hw))
5185                 e1000_get_hw_control(adapter);
5186
5187         return 0;
5188 }
5189 #endif
5190
5191 static void e1000_shutdown(struct pci_dev *pdev)
5192 {
5193         e1000_suspend(pdev, PMSG_SUSPEND);
5194 }
5195
5196 #ifdef CONFIG_NET_POLL_CONTROLLER
5197 /*
5198  * Polling 'interrupt' - used by things like netconsole to send skbs
5199  * without having to re-enable interrupts. It's not called while
5200  * the interrupt routine is executing.
5201  */
5202 static void e1000_netpoll(struct net_device *netdev)
5203 {
5204         struct e1000_adapter *adapter = netdev_priv(netdev);
5205
5206         disable_irq(adapter->pdev->irq);
5207         e1000_intr(adapter->pdev->irq, netdev);
5208 #ifndef CONFIG_E1000_NAPI
5209         adapter->clean_rx(adapter, adapter->rx_ring);
5210 #endif
5211         enable_irq(adapter->pdev->irq);
5212 }
5213 #endif
5214
5215 /**
5216  * e1000_io_error_detected - called when PCI error is detected
5217  * @pdev: Pointer to PCI device
5218  * @state: The current pci conneection state
5219  *
5220  * This function is called after a PCI bus error affecting
5221  * this device has been detected.
5222  */
5223 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5224                                                 pci_channel_state_t state)
5225 {
5226         struct net_device *netdev = pci_get_drvdata(pdev);
5227         struct e1000_adapter *adapter = netdev->priv;
5228
5229         netif_device_detach(netdev);
5230
5231         if (netif_running(netdev))
5232                 e1000_down(adapter);
5233         pci_disable_device(pdev);
5234
5235         /* Request a slot slot reset. */
5236         return PCI_ERS_RESULT_NEED_RESET;
5237 }
5238
5239 /**
5240  * e1000_io_slot_reset - called after the pci bus has been reset.
5241  * @pdev: Pointer to PCI device
5242  *
5243  * Restart the card from scratch, as if from a cold-boot. Implementation
5244  * resembles the first-half of the e1000_resume routine.
5245  */
5246 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5247 {
5248         struct net_device *netdev = pci_get_drvdata(pdev);
5249         struct e1000_adapter *adapter = netdev->priv;
5250         struct e1000_hw *hw = &adapter->hw;
5251
5252         if (pci_enable_device(pdev)) {
5253                 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
5254                 return PCI_ERS_RESULT_DISCONNECT;
5255         }
5256         pci_set_master(pdev);
5257
5258         pci_enable_wake(pdev, PCI_D3hot, 0);
5259         pci_enable_wake(pdev, PCI_D3cold, 0);
5260
5261         e1000_reset(adapter);
5262         ew32(WUS, ~0);
5263
5264         return PCI_ERS_RESULT_RECOVERED;
5265 }
5266
5267 /**
5268  * e1000_io_resume - called when traffic can start flowing again.
5269  * @pdev: Pointer to PCI device
5270  *
5271  * This callback is called when the error recovery driver tells us that
5272  * its OK to resume normal operation. Implementation resembles the
5273  * second-half of the e1000_resume routine.
5274  */
5275 static void e1000_io_resume(struct pci_dev *pdev)
5276 {
5277         struct net_device *netdev = pci_get_drvdata(pdev);
5278         struct e1000_adapter *adapter = netdev->priv;
5279         struct e1000_hw *hw = &adapter->hw;
5280
5281         e1000_init_manageability(adapter);
5282
5283         if (netif_running(netdev)) {
5284                 if (e1000_up(adapter)) {
5285                         printk("e1000: can't bring device back up after reset\n");
5286                         return;
5287                 }
5288         }
5289
5290         netif_device_attach(netdev);
5291
5292         /* If the controller is 82573 and f/w is AMT, do not set
5293          * DRV_LOAD until the interface is up.  For all other cases,
5294          * let the f/w know that the h/w is now under the control
5295          * of the driver. */
5296         if (hw->mac_type != e1000_82573 ||
5297             !e1000_check_mng_mode(hw))
5298                 e1000_get_hw_control(adapter);
5299
5300 }
5301
5302 /* e1000_main.c */