]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
authorLinus Torvalds <torvalds@g5.osdl.org>
Wed, 18 Jan 2006 03:47:31 +0000 (19:47 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 18 Jan 2006 03:47:31 +0000 (19:47 -0800)
83 files changed:
MAINTAINERS
arch/sparc/mm/iommu.c
arch/sparc64/kernel/time.c
drivers/net/b44.c
drivers/net/cassini.c
drivers/net/e100.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_param.c
drivers/net/mv643xx_eth.c
drivers/net/skge.c
drivers/net/sky2.c
drivers/net/spider_net.c
drivers/net/spider_net.h
drivers/net/spider_net_ethtool.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/wireless/airo.c
drivers/net/wireless/atmel.c
drivers/net/wireless/hostap/Kconfig
drivers/net/wireless/hostap/Makefile
drivers/net/wireless/hostap/hostap.h
drivers/net/wireless/hostap/hostap_80211.h
drivers/net/wireless/hostap/hostap_80211_rx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_ap.h
drivers/net/wireless/hostap/hostap_common.h
drivers/net/wireless/hostap/hostap_config.h
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/hostap/hostap_proc.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2200.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/wavelan_cs.c
drivers/video/sbuslib.c
drivers/video/sbuslib.h
include/asm-powerpc/lppaca.h
include/linux/kernel.h
include/linux/netfilter_ipv6/ip6t_ah.h
include/linux/netfilter_ipv6/ip6t_esp.h
include/linux/netfilter_ipv6/ip6t_frag.h
include/linux/netfilter_ipv6/ip6t_opts.h
include/linux/netfilter_ipv6/ip6t_rt.h
include/linux/skbuff.h
include/net/ieee80211_crypt.h
include/net/iw_handler.h
net/bridge/netfilter/ebt_ip.c
net/bridge/netfilter/ebt_log.c
net/core/filter.c
net/core/netpoll.c
net/core/pktgen.c
net/dccp/ackvec.c
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ip_conntrack_proto_gre.c
net/ipv4/netfilter/ipt_policy.c
net/ipv4/route.c
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/ipv6/ip6_flowlabel.c
net/ipv6/mcast.c
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_dst.c
net/ipv6/netfilter/ip6t_eui64.c
net/ipv6/netfilter/ip6t_frag.c
net/ipv6/netfilter/ip6t_hbh.c
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/ip6t_owner.c
net/ipv6/netfilter/ip6t_policy.c
net/ipv6/netfilter/ip6t_rt.c
net/rxrpc/krxtimod.c
net/rxrpc/proc.c
net/sched/sch_prio.c
net/sched/sch_sfq.c
sound/sparc/cs4231.c

index 6d1b048c62a105c12518f5ae1b67ec4a517d29bc..ff16eac8cf5bbaa0b192c56bf11940f30f4b1e0f 100644 (file)
@@ -1696,11 +1696,13 @@ M: mtk-manpages@gmx.net
 W: ftp://ftp.kernel.org/pub/linux/docs/manpages
 S: Maintained
 
-MARVELL MV64340 ETHERNET DRIVER
+MARVELL MV643XX ETHERNET DRIVER
+P:     Dale Farnsworth
+M:     dale@farnsworth.org
 P:     Manish Lachwani
-L:     linux-mips@linux-mips.org
+M:     mlachwani@mvista.com
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Odd Fixes for 2.4; Maintained for 2.6.
 
 MATROX FRAMEBUFFER DRIVER
 P:     Petr Vandrovec
index 489bf68d5f05d49786b4f52de6356a951c9be4ad..77840c80478665058b778939b3219861d9744e5e 100644 (file)
@@ -295,8 +295,7 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
        int ioptex;
        int i;
 
-       if (busa < iommu->start)
-               BUG();
+       BUG_ON(busa < iommu->start);
        ioptex = (busa - iommu->start) >> PAGE_SHIFT;
        for (i = 0; i < npages; i++) {
                iopte_val(iommu->page_table[ioptex + i]) = 0;
@@ -340,9 +339,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
        iopte_t *first;
        int ioptex;
 
-       if ((va & ~PAGE_MASK) != 0) BUG();
-       if ((addr & ~PAGE_MASK) != 0) BUG();
-       if ((len & ~PAGE_MASK) != 0) BUG();
+       BUG_ON((va & ~PAGE_MASK) != 0);
+       BUG_ON((addr & ~PAGE_MASK) != 0);
+       BUG_ON((len & ~PAGE_MASK) != 0);
 
        /* page color = physical address */
        ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
@@ -405,8 +404,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len)
        unsigned long end;
        int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 
-       if ((busa & ~PAGE_MASK) != 0) BUG();
-       if ((len & ~PAGE_MASK) != 0) BUG();
+       BUG_ON((busa & ~PAGE_MASK) != 0);
+       BUG_ON((len & ~PAGE_MASK) != 0);
 
        iopte += ioptex;
        end = busa + len;
index 459c8fbe02b4df734af56e0d16cde8d0b7859ea9..a22930d62adf0de96b7d365a8de9df0e141993b3 100644 (file)
@@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
  * Since STICK is constantly updating, we have to access it carefully.
  *
  * The sequence we use to read is:
- * 1) read low
- * 2) read high
- * 3) read low again, if it rolled over increment high by 1
+ * 1) read high
+ * 2) read low
+ * 3) read high again, if it rolled re-read both low and high again.
  *
  * Writing STICK safely is also tricky:
  * 1) write low to zero
@@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
 static unsigned long __hbird_read_stick(void)
 {
        unsigned long ret, tmp1, tmp2, tmp3;
-       unsigned long addr = HBIRD_STICK_ADDR;
+       unsigned long addr = HBIRD_STICK_ADDR+8;
 
-       __asm__ __volatile__("ldxa      [%1] %5, %2\n\t"
-                            "add       %1, 0x8, %1\n\t"
-                            "ldxa      [%1] %5, %3\n\t"
+       __asm__ __volatile__("ldxa      [%1] %5, %2\n"
+                            "1:\n\t"
                             "sub       %1, 0x8, %1\n\t"
+                            "ldxa      [%1] %5, %3\n\t"
+                            "add       %1, 0x8, %1\n\t"
                             "ldxa      [%1] %5, %4\n\t"
                             "cmp       %4, %2\n\t"
-                            "blu,a,pn  %%xcc, 1f\n\t"
-                            " add      %3, 1, %3\n"
-                            "1:\n\t"
-                            "sllx      %3, 32, %3\n\t"
+                            "bne,a,pn  %%xcc, 1b\n\t"
+                            " mov      %4, %2\n\t"
+                            "sllx      %4, 32, %4\n\t"
                             "or        %3, %4, %0\n\t"
                             : "=&r" (ret), "=&r" (addr),
                               "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
index 7aa49b974dc5510cb17a46099186ea43335ab22b..df9d6e80c4f29f88fb8f96a542cdbe89c5e08021 100644 (file)
@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
 
        /* Setup paramaters for syncing RX/TX DMA descriptors */
        dma_desc_align_mask = ~(dma_desc_align_size - 1);
-       dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+       dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
 
        return pci_module_init(&b44_driver);
 }
index 1f7ca453bb4a28c48a5e44da3024df4b8facdb5c..dde631f8f685fe6453b6738da508c0a162de3411 100644 (file)
@@ -1925,8 +1925,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
        u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
 #endif
        if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
-                       cp->dev->name, status, compwb);
+               printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
+                       cp->dev->name, status, (unsigned long long)compwb);
        /* process all the rings */
        for (ring = 0; ring < N_TX_RINGS; ring++) {
 #ifdef USE_TX_COMPWB
index 4726722a063539a9736bf8305b8bb43603ca299e..bf1fd2b98bf897f5ea252caaa5856c25908ccc42 100644 (file)
@@ -1,25 +1,25 @@
 /*******************************************************************************
 
-  
+
   Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
-  
-  This program is free software; you can redistribute it and/or modify it 
-  under the terms of the GNU General Public License as published by the Free 
-  Software Foundation; either version 2 of the License, or (at your option) 
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
   any later version.
-  
-  This program is distributed in the hope that it will be useful, but WITHOUT 
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
-  
+
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc., 59 
+  this program; if not, write to the Free Software Foundation, Inc., 59
   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
-  
+
   The full GNU General Public License is included in this distribution in the
   file called LICENSE.
-  
+
   Contact Information:
   Linux NICS <linux.nics@intel.com>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 #define DRV_NAME               "e100"
 #define DRV_EXT                "-NAPI"
-#define DRV_VERSION            "3.4.14-k4"DRV_EXT
+#define DRV_VERSION            "3.5.10-k2"DRV_EXT
 #define DRV_DESCRIPTION                "Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT          "Copyright(c) 1999-2005 Intel Corporation"
 #define PFX                    DRV_NAME ": "
@@ -320,7 +320,7 @@ enum cuc_dump {
        cuc_dump_complete       = 0x0000A005,
        cuc_dump_reset_complete = 0x0000A007,
 };
-               
+
 enum port {
        software_reset  = 0x0000,
        selftest        = 0x0001,
@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
                ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
                writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
                e100_write_flush(nic); udelay(4);
-               
+
                writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
                e100_write_flush(nic); udelay(4);
-               
+
                /* Eeprom drives a dummy zero to EEDO after receiving
                 * complete address.  Use this to adjust addr_len. */
                ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
                        *addr_len -= (i - 16);
                        i = 17;
                }
-               
+
                data = (data << 1) | (ctrl & eedo ? 1 : 0);
        }
 
@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
 }
 
-static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
 /* *INDENT-OFF* */
        static struct {
@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *  driver can change the algorithm.
 *
 *  INTDELAY - This loads the dead-man timer with its inital value.
-*    When this timer expires the interrupt is asserted, and the 
+*    When this timer expires the interrupt is asserted, and the
 *    timer is reset each time a new packet is received.  (see
 *    BUNDLEMAX below to set the limit on number of chained packets)
 *    The current default is 0x600 or 1536.  Experiments show that
 *    the value should probably stay within the 0x200 - 0x1000.
 *
-*  BUNDLEMAX - 
+*  BUNDLEMAX -
 *    This sets the maximum number of frames that will be bundled.  In
 *    some situations, such as the TCP windowing algorithm, it may be
 *    better to limit the growth of the bundle size than let it go as
@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *    an interrupt for every frame received.  If you do not want to put
 *    a limit on the bundle size, set this value to xFFFF.
 *
-*  BUNDLESMALL - 
+*  BUNDLESMALL -
 *    This contains a bit-mask describing the minimum size frame that
 *    will be bundled.  The default masks the lower 7 bits, which means
 *    that any frame less than 128 bytes in length will not be bundled,
@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *
 *    The current default is 0xFF80, which masks out the lower 7 bits.
 *    This means that any frame which is x7F (127) bytes or smaller
-*    will cause an immediate interrupt.  Because this value must be a 
+*    will cause an immediate interrupt.  Because this value must be a
 *    bit mask, there are only a few valid values that can be used.  To
 *    turn this feature off, the driver can write the value xFFFF to the
 *    lower word of this instruction (in the same way that the other
@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *    standard Ethernet frames are <= 2047 bytes in length.
 *************************************************************************/
 
-/* if you wish to disable the ucode functionality, while maintaining the 
+/* if you wish to disable the ucode functionality, while maintaining the
  * workarounds it provides, set the following defines to:
  * BUNDLESMALL 0
  * BUNDLEMAX 1
@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 
                for (i = 0; i < UCODE_SIZE; i++)
                        cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-               cb->command = cpu_to_le16(cb_ucode);
+               cb->command = cpu_to_le16(cb_ucode | cb_el);
                return;
        }
 
 noloaducode:
-       cb->command = cpu_to_le16(cb_nop);
+       cb->command = cpu_to_le16(cb_nop | cb_el);
+}
+
+static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
+       void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+{
+       int err = 0, counter = 50;
+       struct cb *cb = nic->cb_to_clean;
+
+       if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
+               DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+
+       /* must restart cuc */
+       nic->cuc_cmd = cuc_start;
+
+       /* wait for completion */
+       e100_write_flush(nic);
+       udelay(10);
+
+       /* wait for possibly (ouch) 500ms */
+       while (!(cb->status & cpu_to_le16(cb_complete))) {
+               msleep(10);
+               if (!--counter) break;
+       }
+
+       /* ack any interupts, something could have been set */
+       writeb(~0, &nic->csr->scb.stat_ack);
+
+       /* if the command failed, or is not OK, notify and return */
+       if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
+               DPRINTK(PROBE,ERR, "ucode load failed\n");
+               err = -EPERM;
+       }
+
+       return err;
 }
 
 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
                mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
        }
 
-       if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 
+       if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
           (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
                /* enable/disable MDI/MDI-X auto-switching.
                   MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
                if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
-                  (nic->mac == mac_82551_10) || (nic->mii.force_media) || 
-                  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 
+                  (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
+                  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
                        mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
                else
                        mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
                return err;
        if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
                return err;
-       if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
+       if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
                return err;
        if((err = e100_exec_cb(nic, NULL, e100_configure)))
                return err;
@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
                }
        }
 
-       
+
        if(e100_exec_cmd(nic, cuc_dump_reset, 0))
                DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
 }
@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
        mii_check_link(&nic->mii);
 
        /* Software generated interrupt to recover from (rare) Rx
-       * allocation failure.
-       * Unfortunately have to use a spinlock to not re-enable interrupts
-       * accidentally, due to hardware that shares a register between the
-       * interrupt mask bit and the SW Interrupt generation bit */
+        * allocation failure.
+        * Unfortunately have to use a spinlock to not re-enable interrupts
+        * accidentally, due to hardware that shares a register between the
+        * interrupt mask bit and the SW Interrupt generation bit */
        spin_lock_irq(&nic->cmd_lock);
        writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
        spin_unlock_irq(&nic->cmd_lock);
@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
        struct rx *rx_to_start = NULL;
 
        /* are we already rnr? then pay attention!!! this ensures that
-        * the state machine progression never allows a start with a 
+        * the state machine progression never allows a start with a
         * partially cleaned list, avoiding a race between hardware
         * and rx_to_clean when in NAPI mode */
        if(RU_SUSPENDED == nic->ru_running)
@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
 {
        struct nic *nic = netdev_priv(netdev);
 
-       /* Reset outside of interrupt context, to avoid request_irq 
+       /* Reset outside of interrupt context, to avoid request_irq
         * in interrupt context */
        schedule_work(&nic->tx_timeout_task);
 }
@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
        struct param_range *rfds = &nic->params.rfds;
        struct param_range *cbs = &nic->params.cbs;
 
-       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
        if(netif_running(netdev))
@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
                nic->flags |= wol_magic;
 
        /* ack any pending wake events, disable PME */
-       pci_enable_wake(pdev, 0, 0);
+       err = pci_enable_wake(pdev, 0, 0);
+       if (err)
+               DPRINTK(PROBE, ERR, "Error clearing wake event\n");
 
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev))) {
@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
        if(netif_running(netdev))
                e100_down(nic);
@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
        netif_device_detach(netdev);
 
        pci_save_state(pdev);
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
+       retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
+                                nic->flags & (wol_magic | e100_asf(nic)));
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error enabling wake\n");
        pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
 
        return 0;
 }
@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
-       pci_set_power_state(pdev, PCI_D0);
+       retval = pci_set_power_state(pdev, PCI_D0);
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error waking adapter\n");
        pci_restore_state(pdev);
        /* ack any pending wake events, disable PME */
-       pci_enable_wake(pdev, 0, 0);
+       retval = pci_enable_wake(pdev, 0, 0);
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error clearing wake events\n");
        if(e100_hw_init(nic))
                DPRINTK(HW, ERR, "e100_hw_init failed\n");
 
@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
 #ifdef CONFIG_PM
-       pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+       retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
 #else
-       pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
+       retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
 #endif
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error enabling wake\n");
 }
 
 
@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
        .suspend =      e100_suspend,
        .resume =       e100_resume,
 #endif
-       .shutdown =     e100_shutdown,
+       .shutdown =     e100_shutdown,
 };
 
 static int __init e100_init_module(void)
index e02e9ba2e18b0b66166f2f877a2e282f0a4926f8..27c77306193b2cb14e17589455ec35ca6d2e4576 100644 (file)
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
-#ifdef CONFIG_E1000_MQ
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#endif
 
 #define BAR_0          0
 #define BAR_1          1
 struct e1000_adapter;
 
 #include "e1000_hw.h"
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
 
 #ifdef DBG
 #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
        uint16_t next_to_watch;
 };
 
+#ifdef CONFIG_E1000_MQ
+struct e1000_queue_stats {
+       uint64_t packets;
+       uint64_t bytes;
+};
+#endif
+
 struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
 struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
 
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
        spinlock_t tx_lock;
        uint16_t tdh;
        uint16_t tdt;
-       uint64_t pkt;
 
        boolean_t last_tx_tso;
 
+#ifdef CONFIG_E1000_MQ
+       struct e1000_queue_stats tx_stats;
+#endif
 };
 
 struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
        struct e1000_ps_page *ps_page;
        struct e1000_ps_page_dma *ps_page_dma;
 
+       struct sk_buff *rx_skb_top;
+       struct sk_buff *rx_skb_prev;
+
+       /* cpu for rx queue */
+       int cpu;
+
        uint16_t rdh;
        uint16_t rdt;
-       uint64_t pkt;
+#ifdef CONFIG_E1000_MQ
+       struct e1000_queue_stats rx_stats;
+#endif
 };
 
 #define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
        uint16_t link_speed;
        uint16_t link_duplex;
        spinlock_t stats_lock;
+#ifdef CONFIG_E1000_NAPI
+       spinlock_t tx_queue_lock;
+#endif
        atomic_t irq_sem;
        struct work_struct tx_timeout_task;
        struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
 #ifdef CONFIG_E1000_MQ
        struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
 #endif
+       unsigned long tx_queue_len;
        uint32_t txd_cmd;
        uint32_t tx_int_delay;
        uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
        uint64_t gotcl_old;
        uint64_t tpt_old;
        uint64_t colc_old;
+       uint32_t tx_timeout_count;
        uint32_t tx_fifo_head;
        uint32_t tx_head_addr;
        uint32_t tx_fifo_size;
+       uint8_t  tx_timeout_factor;
        atomic_t tx_fifo_stall;
        boolean_t pcix_82544;
        boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
        /* RX */
 #ifdef CONFIG_E1000_NAPI
        boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-                              struct e1000_rx_ring *rx_ring,
-                              int *work_done, int work_to_do);
+                                                  struct e1000_rx_ring *rx_ring,
+                                                  int *work_done, int work_to_do);
 #else
        boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-                              struct e1000_rx_ring *rx_ring);
+                                                  struct e1000_rx_ring *rx_ring);
 #endif
        void (*alloc_rx_buf) (struct e1000_adapter *adapter,
-                             struct e1000_rx_ring *rx_ring);
+                                                 struct e1000_rx_ring *rx_ring,
+                                                 int cleaned_count);
        struct e1000_rx_ring *rx_ring;      /* One per active queue */
 #ifdef CONFIG_E1000_NAPI
        struct net_device *polling_netdev;  /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
 #ifdef CONFIG_E1000_MQ
        struct net_device **cpu_netdev;     /* per-cpu */
        struct call_async_data_struct rx_sched_call_data;
-       int cpu_for_queue[4];
+       cpumask_t cpumask;
 #endif
-       int num_queues;
+       int num_tx_queues;
+       int num_rx_queues;
 
        uint64_t hw_csum_err;
        uint64_t hw_csum_good;
        uint64_t rx_hdr_split;
+       uint32_t alloc_rx_buff_failed;
        uint32_t rx_int_delay;
        uint32_t rx_abs_int_delay;
        boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
        struct e1000_rx_ring test_rx_ring;
 
 
+       u32 *config_space;
        int msg_enable;
 #ifdef CONFIG_PCI_MSI
        boolean_t have_msi;
index c88f1a3c1b1db7d58a87a779d6517480e514d31c..d252297e4db0826a676baf48a4833b55f9b7d5ad 100644 (file)
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "tx_deferred_ok", E1000_STAT(stats.dc) },
        { "tx_single_coll_ok", E1000_STAT(stats.scc) },
        { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+       { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
        { "rx_long_length_errors", E1000_STAT(stats.roc) },
        { "rx_short_length_errors", E1000_STAT(stats.ruc) },
        { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
        { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
        { "rx_header_split", E1000_STAT(rx_hdr_split) },
+       { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
 };
-#define E1000_STATS_LEN        \
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_QUEUE_STATS_LEN \
+       (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
+        ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
+       * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
+#else
+#define E1000_QUEUE_STATS_LEN 0
+#endif
+#define E1000_GLOBAL_STATS_LEN \
        sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
 static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)", "Eeprom test    (offline)",
        "Interrupt test (offline)", "Loopback test  (offline)",
@@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if(ecmd->autoneg == AUTONEG_ENABLE) {
+       /* When SoL/IDER sessions are active, autoneg/speed/duplex
+        * cannot be changed */
+       if (e1000_check_phy_reset_block(hw)) {
+               DPRINTK(DRV, ERR, "Cannot change link characteristics "
+                       "when SoL/IDER is active.\n");
+               return -EINVAL;
+       }
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
                hw->autoneg = 1;
                if(hw->media_type == e1000_media_type_fiber)
                        hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
@@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev,
 
        strncpy(drvinfo->driver,  e1000_driver_name, 32);
        strncpy(drvinfo->version, e1000_driver_version, 32);
-       
-       /* EEPROM image version # is reported as firware version # for
+
+       /* EEPROM image version # is reported as firmware version # for
         * 8257{1|2|3} controllers */
        e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
        switch (adapter->hw.mac_type) {
        case e1000_82571:
        case e1000_82572:
        case e1000_82573:
-               sprintf(firmware_version, "%d.%d-%d", 
+               sprintf(firmware_version, "%d.%d-%d",
                        (eeprom_data & 0xF000) >> 12,
                        (eeprom_data & 0x0FF0) >> 4,
                        eeprom_data & 0x000F);
                break;
        default:
-               sprintf(firmware_version, "n/a");
+               sprintf(firmware_version, "N/A");
        }
 
        strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev,
        struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
        int i, err, tx_ring_size, rx_ring_size;
 
-       tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
-       rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+       tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+       rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
 
        if (netif_running(adapter->netdev))
                e1000_down(adapter);
@@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev,
                E1000_MAX_TXD : E1000_MAX_82544_TXD));
        E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_tx_queues; i++)
                txdr[i].count = txdr->count;
+       for (i = 0; i < adapter->num_rx_queues; i++)
                rxdr[i].count = rxdr->count;
-       }
 
        if(netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
@@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
                }
        }
 
-       if(txdr->desc) {
+       if (txdr->desc) {
                pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
                txdr->desc = NULL;
        }
-       if(rxdr->desc) {
+       if (rxdr->desc) {
                pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
                rxdr->desc = NULL;
        }
 
        kfree(txdr->buffer_info);
        txdr->buffer_info = NULL;
-
        kfree(rxdr->buffer_info);
        rxdr->buffer_info = NULL;
 
@@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
 static int
 e1000_setup_loopback_test(struct e1000_adapter *adapter)
 {
-       uint32_t rctl;
        struct e1000_hw *hw = &adapter->hw;
+       uint32_t rctl;
 
        if (hw->media_type == e1000_media_type_fiber ||
-          hw->media_type == e1000_media_type_internal_serdes) {
+           hw->media_type == e1000_media_type_internal_serdes) {
                switch (hw->mac_type) {
                case e1000_82545:
                case e1000_82546:
@@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
 static void
 e1000_loopback_cleanup(struct e1000_adapter *adapter)
 {
+       struct e1000_hw *hw = &adapter->hw;
        uint32_t rctl;
        uint16_t phy_reg;
-       struct e1000_hw *hw = &adapter->hw;
 
-       rctl = E1000_READ_REG(&adapter->hw, RCTL);
+       rctl = E1000_READ_REG(hw, RCTL);
        rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-       E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+       E1000_WRITE_REG(hw, RCTL, rctl);
 
        switch (hw->mac_type) {
        case e1000_82571:
        case e1000_82572:
                if (hw->media_type == e1000_media_type_fiber ||
-                  hw->media_type == e1000_media_type_internal_serdes){
+                   hw->media_type == e1000_media_type_internal_serdes) {
 #define E1000_SERDES_LB_OFF 0x400
                        E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
                        msec_delay(10);
                        break;
                }
-               /* fall thru for Cu adapters */
+               /* Fall Through */
        case e1000_82545:
        case e1000_82546:
        case e1000_82545_rev_3:
@@ -1401,7 +1420,7 @@ static void
 e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
        memset(skb->data, 0xFF, frame_size);
-       frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+       frame_size &= ~1;
        memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
        memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
        memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 static int
 e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
-       frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+       frame_size &= ~1;
        if(*(skb->data + 3) == 0xFF) {
                if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
                   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
@@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
 static int
 e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
 {
-       if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
-       if((*data = e1000_setup_loopback_test(adapter)))
-               goto err_loopback_setup;
+       /* PHY loopback cannot be performed if SoL/IDER
+        * sessions are active */
+       if (e1000_check_phy_reset_block(&adapter->hw)) {
+               DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+                       "when SoL/IDER is active.\n");
+               *data = 0;
+               goto out;
+       }
+
+       if ((*data = e1000_setup_desc_rings(adapter)))
+               goto out;
+       if ((*data = e1000_setup_loopback_test(adapter)))
+               goto err_loopback;
        *data = e1000_run_loopback_test(adapter);
        e1000_loopback_cleanup(adapter);
-err_loopback_setup:
-       e1000_free_desc_rings(adapter);
+
 err_loopback:
+       e1000_free_desc_rings(adapter);
+out:
        return *data;
 }
 
@@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
+       case E1000_DEV_ID_82571EB_FIBER:
                /* Wake events only supported on port A for dual fiber */
                if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
                        wol->supported = 0;
@@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
+       case E1000_DEV_ID_82571EB_FIBER:
                /* Wake events only supported on port A for dual fiber */
                if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
                        return wol->wolopts ? -EOPNOTSUPP : 0;
@@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
                mod_timer(&adapter->blink_timer, jiffies);
                msleep_interruptible(data * 1000);
                del_timer_sync(&adapter->blink_timer);
-       }
-       else if(adapter->hw.mac_type < e1000_82573) {
-               E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
-                       E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
-                       (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
+       } else if (adapter->hw.mac_type < e1000_82573) {
+               E1000_WRITE_REG(&adapter->hw, LEDCTL,
+                       (E1000_LEDCTL_LED2_BLINK_RATE |
+                        E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
+                        (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
                msleep_interruptible(data * 1000);
-       }
-       else {
-               E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
-                       E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 
-                       (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
+       } else {
+               E1000_WRITE_REG(&adapter->hw, LEDCTL,
+                       (E1000_LEDCTL_LED2_BLINK_RATE |
+                        E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
+                        (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
                msleep_interruptible(data * 1000);
        }
 
@@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev,
                struct ethtool_stats *stats, uint64_t *data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_E1000_MQ
+       uint64_t *queue_stat;
+       int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
+       int j, k;
+#endif
        int i;
 
        e1000_update_stats(adapter);
-       for(i = 0; i < E1000_STATS_LEN; i++) {
-               char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;  
-               data[i] = (e1000_gstrings_stats[i].sizeof_stat == 
+       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+               char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+               data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
                        sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
        }
+#ifdef CONFIG_E1000_MQ
+       for (j = 0; j < adapter->num_tx_queues; j++) {
+               queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
+       }
+       for (j = 0; j < adapter->num_rx_queues; j++) {
+               queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
+       }
+#endif
+/*     BUG_ON(i != E1000_STATS_LEN); */
 }
 
 static void 
 e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
 {
+#ifdef CONFIG_E1000_MQ
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+#endif
+       uint8_t *p = data;
        int i;
 
        switch(stringset) {
@@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
                        E1000_TEST_LEN*ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
-               for (i=0; i < E1000_STATS_LEN; i++) {
-                       memcpy(data + i * ETH_GSTRING_LEN, 
-                       e1000_gstrings_stats[i].stat_string,
-                       ETH_GSTRING_LEN);
+               for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+                       memcpy(p, e1000_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+#ifdef CONFIG_E1000_MQ
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       sprintf(p, "tx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
                }
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       sprintf(p, "rx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
+#endif
+/*             BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
                break;
        }
 }
index 136fc031e4ad555d168a8b0ac002b460ebea8ebb..2437d362ff636f372b2f03ddbcb0afe0fa2965d9 100644 (file)
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
     case E1000_DEV_ID_82546GB_FIBER:
     case E1000_DEV_ID_82546GB_SERDES:
     case E1000_DEV_ID_82546GB_PCIE:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
         hw->mac_type = e1000_82546_rev_3;
         break;
     case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
     uint16_t cmd_mmrbc;
     uint16_t stat_mmrbc;
     uint32_t mta_size;
+    uint32_t ctrl_ext;
 
     DEBUGFUNC("e1000_init_hw");
 
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
             break;
         case e1000_82571:
         case e1000_82572:
-            ctrl |= (1 << 22);
         case e1000_82573:
             ctrl |= E1000_TXDCTL_COUNT_DESC;
             break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
      */
     e1000_clear_hw_cntrs(hw);
 
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
     return ret_val;
 }
 
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_setup_link");
 
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
     /* Read and store word 0x0F of the EEPROM. This word contains bits
      * that determine the hardware's default PAUSE (flow control) mode,
      * a bit that determines whether the HW defaults to enabling or
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 void
 e1000_config_collision_dist(struct e1000_hw *hw)
 {
-    uint32_t tctl;
+    uint32_t tctl, coll_dist;
 
     DEBUGFUNC("e1000_config_collision_dist");
 
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
     tctl = E1000_READ_REG(hw, TCTL);
 
     tctl &= ~E1000_TCTL_COLD;
-    tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
 
     E1000_WRITE_REG(hw, TCTL, tctl);
     E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
         
         if (hw->mac_type < e1000_82571) 
             msec_delay(10);
+        else
+            udelay(100);
         
         E1000_WRITE_REG(hw, CTRL, ctrl);
         E1000_WRITE_FLUSH(hw);
@@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw,
         return -E1000_ERR_EEPROM;
     }
 
-    /* FLASH reads without acquiring the semaphore are safe in 82573-based
-     * controllers.
-     */
-    if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
-        (hw->mac_type != e1000_82573)) {
-        /* Prepare the EEPROM for reading  */
-        if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
-            return -E1000_ERR_EEPROM;
+    /* FLASH reads without acquiring the semaphore are safe */
+    if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
+    hw->eeprom.use_eerd == FALSE) {
+        switch (hw->mac_type) {
+        default:
+            /* Prepare the EEPROM for reading  */
+            if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+                return -E1000_ERR_EEPROM;
+            break;
+        }
     }
 
     if(eeprom->use_eerd == TRUE) {
@@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
         break;
     }
 
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msec_delay(25);
+
     return E1000_SUCCESS;
 }
 
index 7caa35748ceac9c8236d1510f9335b2e57f9073b..0b8f6f2b774b3e51f090b3fa3d51bf1a15428765 100644 (file)
@@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82546GB_FIBER       0x107A
 #define E1000_DEV_ID_82546GB_SERDES      0x107B
 #define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82571EB_COPPER      0x105E
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
@@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82573E              0x108B
 #define E1000_DEV_ID_82573E_IAMT         0x108C
 #define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
 
 
 #define NODE_ADDRESS_SIZE 6
@@ -1497,6 +1499,7 @@ struct e1000_hw {
 #define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
 #define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
 #define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
 #define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
@@ -1954,6 +1957,23 @@ struct e1000_host_command_info {
 
 #define E1000_MDALIGN          4096
 
+/* PCI-Ex registers */
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP                 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP              0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP              0x00000004
+#define E1000_GCR_TXD_NO_SNOOP                 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP              0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP              0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP            | \
+                                                        E1000_GCR_RXDSCW_NO_SNOOP      | \
+                                                        E1000_GCR_RXDSCR_NO_SNOOP      | \
+                                                        E1000_GCR TXD_NO_SNOOP         | \
+                                                        E1000_GCR_TXDSCW_NO_SNOOP      | \
+                                                        E1000_GCR_TXDSCR_NO_SNOOP)
+
 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
 /* Function Active and Power State to MNG */
 #define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
@@ -2077,7 +2097,10 @@ struct e1000_host_command_info {
 /* Collision related configuration parameters */
 #define E1000_COLLISION_THRESHOLD       15
 #define E1000_CT_SHIFT                  4
-#define E1000_COLLISION_DISTANCE        64
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
 #define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
 #define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
 #define E1000_COLD_SHIFT                12
index 438a931fd55df4c6dd57e00d9480f35c4e9fb461..d0a5d1656c5facad671562dfd5e88a2b541bdc01 100644 (file)
@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
+#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
 
@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x108A),
        INTEL_E1000_ETHERNET_DEVICE(0x108B),
        INTEL_E1000_ETHERNET_DEVICE(0x108C),
+       INTEL_E1000_ETHERNET_DEVICE(0x1099),
        INTEL_E1000_ETHERNET_DEVICE(0x109A),
+       INTEL_E1000_ETHERNET_DEVICE(0x10B5),
        /* required last entry */
        {0,}
 };
@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                        struct e1000_rx_ring *rx_ring);
 #endif
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
-                                   struct e1000_rx_ring *rx_ring);
+                                   struct e1000_rx_ring *rx_ring,
+                                  int cleaned_count);
 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-                                      struct e1000_rx_ring *rx_ring);
+                                      struct e1000_rx_ring *rx_ring,
+                                     int cleaned_count);
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
                           int cmd);
@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
                }
        }
 }
-       
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the netowrk i/f is closed.
+ * 
+ **/
+
+static inline void 
+e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+       uint32_t ctrl_ext;
+       uint32_t swsm;
+
+       /* Let firmware taken over control of h/w */
+       switch (adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+               break;
+       case e1000_82573:
+               swsm = E1000_READ_REG(&adapter->hw, SWSM);
+               E1000_WRITE_REG(&adapter->hw, SWSM,
+                               swsm & ~E1000_SWSM_DRV_LOAD);
+       default:
+               break;
+       }
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that 
+ * the driver is loaded. For AMT version (only with 82573) 
+ * of the f/w this means that the netowrk i/f is open.
+ * 
+ **/
+
+static inline void 
+e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+       uint32_t ctrl_ext;
+       uint32_t swsm;
+       /* Let firmware know the driver has taken over */
+       switch (adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+               break;
+       case e1000_82573:
+               swsm = E1000_READ_REG(&adapter->hw, SWSM);
+               E1000_WRITE_REG(&adapter->hw, SWSM,
+                               swsm | E1000_SWSM_DRV_LOAD);
+               break;
+       default:
+               break;
+       }
+}
+
 int
 e1000_up(struct e1000_adapter *adapter)
 {
@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter)
        e1000_configure_tx(adapter);
        e1000_setup_rctl(adapter);
        e1000_configure_rx(adapter);
-       for (i = 0; i < adapter->num_queues; i++)
-               adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
+       /* call E1000_DESC_UNUSED which always leaves
+        * at least 1 descriptor unused to make sure
+        * next_to_use != next_to_clean */
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+               adapter->alloc_rx_buf(adapter, ring,
+                                     E1000_DESC_UNUSED(ring));
+       }
 
 #ifdef CONFIG_PCI_MSI
        if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter)
                return err;
        }
 
+#ifdef CONFIG_E1000_MQ
+       e1000_setup_queue_mapping(adapter);
+#endif
+
+       adapter->tx_queue_len = netdev->tx_queue_len;
+
        mod_timer(&adapter->watchdog_timer, jiffies);
 
 #ifdef CONFIG_E1000_NAPI
@@ -378,6 +462,8 @@ void
 e1000_down(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+                                    e1000_check_mng_mode(&adapter->hw);
 
        e1000_irq_disable(adapter);
 #ifdef CONFIG_E1000_MQ
@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter)
 #ifdef CONFIG_E1000_NAPI
        netif_poll_disable(netdev);
 #endif
+       netdev->tx_queue_len = adapter->tx_queue_len;
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
        netif_carrier_off(netdev);
@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter)
        e1000_clean_all_tx_rings(adapter);
        e1000_clean_all_rx_rings(adapter);
 
-       /* If WoL is not enabled and management mode is not IAMT
-        * Power down the PHY so no link is implied when interface is down */
-       if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+       /* Power down the PHY so no link is implied when interface is down *
+        * The PHY cannot be powered down if any of the following is TRUE *
+        * (a) WoL is enabled
+        * (b) AMT is active
+        * (c) SoL/IDER session is active */
+       if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper &&
-          !e1000_check_mng_mode(&adapter->hw) &&
-          !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
+          !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+          !mng_mode_enabled &&
+          !e1000_check_phy_reset_block(&adapter->hw)) {
                uint16_t mii_reg;
                e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
                mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter)
 void
 e1000_reset(struct e1000_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
        uint32_t pba, manc;
        uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
-       uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
 
        /* Repartition Pba for greater than 9k mtu
         * To take effect CTRL.RST is required.
@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter)
        }
 
        if((adapter->hw.mac_type != e1000_82573) &&
-          (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
+          (adapter->netdev->mtu > E1000_RXBUFFER_8192))
                pba -= 8; /* allocate more FIFO for Tx */
-               /* send an XOFF when there is enough space in the
-                * Rx FIFO to hold one extra full size Rx packet 
-               */
-               fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + 
-                                       ETHERNET_FCS_SIZE + 1;
-               fc_low_water_mark = fc_high_water_mark + 8;
-       }
 
 
        if(adapter->hw.mac_type == e1000_82547) {
@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter)
        E1000_WRITE_REG(&adapter->hw, PBA, pba);
 
        /* flow control settings */
-       adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
-                                   fc_high_water_mark;
-       adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
-                                  fc_low_water_mark;
+       /* Set the FC high water mark to 90% of the FIFO size.
+        * Required to clear last 3 LSB */
+       fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+
+       adapter->hw.fc_high_water = fc_high_water_mark;
+       adapter->hw.fc_low_water = fc_high_water_mark - 8;
        adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
        adapter->hw.fc_send_xon = 1;
        adapter->hw.fc = adapter->hw.original_fc;
@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct e1000_adapter *adapter;
        unsigned long mmio_start, mmio_len;
-       uint32_t ctrl_ext;
-       uint32_t swsm;
 
        static int cards_found = 0;
        int i, err, pci_using_dac;
@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev,
        case e1000_82546:
        case e1000_82546_rev_3:
        case e1000_82571:
-               if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
-                  && (adapter->hw.media_type == e1000_media_type_copper)) {
+               if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
                        e1000_read_eeprom(&adapter->hw,
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
                        break;
@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev,
        if(eeprom_data & eeprom_apme_mask)
                adapter->wol |= E1000_WUFC_MAG;
 
+       /* print bus type/speed/width info */
+       {
+       struct e1000_hw *hw = &adapter->hw;
+       DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+                (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+               ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+                (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+                (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+                (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+                (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+               ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+                (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+                (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+                "32-bit"));
+       }
+
+       for (i = 0; i < 6; i++)
+               printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
        /* reset the hardware with the new settings */
        e1000_reset(adapter);
 
-       /* Let firmware know the driver has taken over */
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm | E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* If the controller is 82573 and f/w is AMT, do not set
+        * DRV_LOAD until the interface is up.  For all other cases,
+        * let the f/w know that the h/w is now under the control
+        * of the driver. */
+       if (adapter->hw.mac_type != e1000_82573 ||
+           !e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
 
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev)))
@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t ctrl_ext;
-       uint32_t manc, swsm;
+       uint32_t manc;
 #ifdef CONFIG_E1000_NAPI
        int i;
 #endif
@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev)
                }
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm & ~E1000_SWSM_DRV_LOAD);
-               break;
-
-       default:
-               break;
-       }
+       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
+        * would have already happened in close and is redundant. */
+       e1000_release_hw_control(adapter);
 
        unregister_netdev(netdev);
 #ifdef CONFIG_E1000_NAPI
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                __dev_put(&adapter->polling_netdev[i]);
 #endif
 
@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
        switch (hw->mac_type) {
        case e1000_82571:
        case e1000_82572:
-               adapter->num_queues = 2;
+               /* These controllers support 2 tx queues, but with a single
+                * qdisc implementation, multiple tx queues aren't quite as
+                * interesting.  If we can find a logical way of mapping
+                * flows to a queue, then perhaps we can up the num_tx_queue
+                * count back to its default.  Until then, we run the risk of
+                * terrible performance due to SACK overload. */
+               adapter->num_tx_queues = 1;
+               adapter->num_rx_queues = 2;
                break;
        default:
-               adapter->num_queues = 1;
+               adapter->num_tx_queues = 1;
+               adapter->num_rx_queues = 1;
                break;
        }
-       adapter->num_queues = min(adapter->num_queues, num_online_cpus());
+       adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
+       adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
+       DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
+               adapter->num_rx_queues,
+               ((adapter->num_rx_queues == 1)
+                ? ((num_online_cpus() > 1)
+                       ? "(due to unsupported feature in current adapter)"
+                       : "(due to unsupported system configuration)")
+                : ""));
+       DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
+               adapter->num_tx_queues);
 #else
-       adapter->num_queues = 1;
+       adapter->num_tx_queues = 1;
+       adapter->num_rx_queues = 1;
 #endif
 
        if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
        }
 
 #ifdef CONFIG_E1000_NAPI
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_rx_queues; i++) {
                adapter->polling_netdev[i].priv = adapter;
                adapter->polling_netdev[i].poll = &e1000_clean;
                adapter->polling_netdev[i].weight = 64;
                dev_hold(&adapter->polling_netdev[i]);
                set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
        }
-#endif
-
-#ifdef CONFIG_E1000_MQ
-       e1000_setup_queue_mapping(adapter);
+       spin_lock_init(&adapter->tx_queue_lock);
 #endif
 
        atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
 {
        int size;
 
-       size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+       size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
        adapter->tx_ring = kmalloc(size, GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
        memset(adapter->tx_ring, 0, size);
 
-       size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+       size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
        adapter->rx_ring = kmalloc(size, GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
        memset(adapter->rx_ring, 0, size);
 
 #ifdef CONFIG_E1000_NAPI
-       size = sizeof(struct net_device) * adapter->num_queues;
+       size = sizeof(struct net_device) * adapter->num_rx_queues;
        adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
        if (!adapter->polling_netdev) {
                kfree(adapter->tx_ring);
@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
        memset(adapter->polling_netdev, 0, size);
 #endif
 
+#ifdef CONFIG_E1000_MQ
+       adapter->rx_sched_call_data.func = e1000_rx_schedule;
+       adapter->rx_sched_call_data.info = adapter->netdev;
+
+       adapter->cpu_netdev = alloc_percpu(struct net_device *);
+       adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+#endif
+
        return E1000_SUCCESS;
 }
 
@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
        lock_cpu_hotplug();
        i = 0;
        for_each_online_cpu(cpu) {
-               *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
+               *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
                /* This is incomplete because we'd like to assign separate
                 * physical cpus to these netdev polling structures and
                 * avoid saturating a subset of cpus.
                 */
-               if (i < adapter->num_queues) {
+               if (i < adapter->num_rx_queues) {
                        *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
-                       adapter->cpu_for_queue[i] = cpu;
+                       adapter->rx_ring[i].cpu = cpu;
+                       cpu_set(cpu, adapter->cpumask);
                } else
                        *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
 
@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev)
                e1000_update_mng_vlan(adapter);
        }
 
+       /* If AMT is enabled, let the firmware know that the network
+        * interface is now open */
+       if (adapter->hw.mac_type == e1000_82573 &&
+           e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
+
        return E1000_SUCCESS;
 
 err_up:
@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev)
                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
        }
+
+       /* If AMT is enabled, let the firmware know that the network
+        * interface is now closed */
+       if (adapter->hw.mac_type == e1000_82573 &&
+           e1000_check_mng_mode(&adapter->hw))
+               e1000_release_hw_control(adapter);
+
        return 0;
 }
 
@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_tx_queues; i++) {
                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
                if (err) {
                        DPRINTK(PROBE, ERR,
@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
        uint64_t tdba;
        struct e1000_hw *hw = &adapter->hw;
        uint32_t tdlen, tctl, tipg, tarc;
+       uint32_t ipgr1, ipgr2;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
 
-       switch (adapter->num_queues) {
+       switch (adapter->num_tx_queues) {
        case 2:
                tdba = adapter->tx_ring[1].dma;
                tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
 
        /* Set the default values for the Tx Inter Packet Gap timer */
 
+       if (hw->media_type == e1000_media_type_fiber ||
+           hw->media_type == e1000_media_type_internal_serdes)
+               tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+       else
+               tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
        switch (hw->mac_type) {
        case e1000_82542_rev2_0:
        case e1000_82542_rev2_1:
                tipg = DEFAULT_82542_TIPG_IPGT;
-               tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
-               tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+               ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+               ipgr2 = DEFAULT_82542_TIPG_IPGR2;
                break;
        default:
-               if (hw->media_type == e1000_media_type_fiber ||
-                   hw->media_type == e1000_media_type_internal_serdes)
-                       tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
-               else
-                       tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
-               tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
-               tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+               ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+               ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+               break;
        }
+       tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+       tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
        E1000_WRITE_REG(hw, TIPG, tipg);
 
        /* Set the Tx Interrupt Delay register */
@@ -1454,6 +1575,8 @@ setup_rx_desc_die:
 
        rxdr->next_to_clean = 0;
        rxdr->next_to_use = 0;
+       rxdr->rx_skb_top = NULL;
+       rxdr->rx_skb_prev = NULL;
 
        return 0;
 }
@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_rx_queues; i++) {
                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
                if (err) {
                        DPRINTK(PROBE, ERR,
@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
                (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
-       if(adapter->hw.tbi_compatibility_on == 1)
+       if (adapter->hw.mac_type > e1000_82543)
+               rctl |= E1000_RCTL_SECRC;
+
+       if (adapter->hw.tbi_compatibility_on == 1)
                rctl |= E1000_RCTL_SBP;
        else
                rctl &= ~E1000_RCTL_SBP;
@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter)
        }
 
        if (hw->mac_type >= e1000_82571) {
-               /* Reset delay timers after every interrupt */
                ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+               /* Reset delay timers after every interrupt */
                ctrl_ext |= E1000_CTRL_EXT_CANC;
+#ifdef CONFIG_E1000_NAPI
+               /* Auto-Mask interrupts upon ICR read. */
+               ctrl_ext |= E1000_CTRL_EXT_IAME;
+#endif
                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+               E1000_WRITE_REG(hw, IAM, ~0);
                E1000_WRITE_FLUSH(hw);
        }
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       switch (adapter->num_queues) {
+       switch (adapter->num_rx_queues) {
 #ifdef CONFIG_E1000_MQ
        case 2:
                rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
        }
 
 #ifdef CONFIG_E1000_MQ
-       if (adapter->num_queues > 1) {
+       if (adapter->num_rx_queues > 1) {
                uint32_t random[10];
 
                get_random_bytes(&random[0], 40);
@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
                        E1000_WRITE_REG(hw, RSSIM, 0);
                }
 
-               switch (adapter->num_queues) {
+               switch (adapter->num_rx_queues) {
                case 2:
                default:
                        reta = 0x00800080;
@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_tx_queues; i++)
                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
                                buffer_info->dma,
                                buffer_info->length,
                                PCI_DMA_TODEVICE);
-               buffer_info->dma = 0;
        }
-       if(buffer_info->skb) {
+       if (buffer_info->skb)
                dev_kfree_skb_any(buffer_info->skb);
-               buffer_info->skb = NULL;
-       }
+       memset(buffer_info, 0, sizeof(struct e1000_buffer));
 }
 
 /**
@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_tx_queues; i++)
                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
 }
 
@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
        for(i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if(buffer_info->skb) {
-                       ps_page = &rx_ring->ps_page[i];
-                       ps_page_dma = &rx_ring->ps_page_dma[i];
                        pci_unmap_single(pdev,
                                         buffer_info->dma,
                                         buffer_info->length,
@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
 
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
-
-                       for(j = 0; j < adapter->rx_ps_pages; j++) {
-                               if(!ps_page->ps_page[j]) break;
-                               pci_unmap_single(pdev,
-                                                ps_page_dma->ps_page_dma[j],
-                                                PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               ps_page_dma->ps_page_dma[j] = 0;
-                               put_page(ps_page->ps_page[j]);
-                               ps_page->ps_page[j] = NULL;
-                       }
+               }
+               ps_page = &rx_ring->ps_page[i];
+               ps_page_dma = &rx_ring->ps_page_dma[i];
+               for (j = 0; j < adapter->rx_ps_pages; j++) {
+                       if (!ps_page->ps_page[j]) break;
+                       pci_unmap_page(pdev,
+                                      ps_page_dma->ps_page_dma[j],
+                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       ps_page_dma->ps_page_dma[j] = 0;
+                       put_page(ps_page->ps_page[j]);
+                       ps_page->ps_page[j] = NULL;
                }
        }
 
+       /* there also may be some cached data in our adapter */
+       if (rx_ring->rx_skb_top) {
+               dev_kfree_skb(rx_ring->rx_skb_top);
+
+               /* rx_skb_prev will be wiped out by rx_skb_top */
+               rx_ring->rx_skb_top = NULL;
+               rx_ring->rx_skb_prev = NULL;
+       }
+
+
        size = sizeof(struct e1000_buffer) * rx_ring->count;
        memset(rx_ring->buffer_info, 0, size);
        size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
 
        if(netif_running(netdev)) {
                e1000_configure_rx(adapter);
-               e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
+               /* No need to loop, because 82542 supports only 1 queue */
+               struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+               adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
        }
 }
 
@@ -2204,7 +2344,7 @@ static void
 e1000_watchdog_task(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
+       struct e1000_tx_ring *txdr = adapter->tx_ring;
        uint32_t link;
 
        e1000_check_for_link(&adapter->hw);
@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
                               adapter->link_duplex == FULL_DUPLEX ?
                               "Full Duplex" : "Half Duplex");
 
+                       /* tweak tx_queue_len according to speed/duplex */
+                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       adapter->tx_timeout_factor = 1;
+                       if (adapter->link_duplex == HALF_DUPLEX) {
+                               switch (adapter->link_speed) {
+                               case SPEED_10:
+                                       netdev->tx_queue_len = 10;
+                                       adapter->tx_timeout_factor = 8;
+                                       break;
+                               case SPEED_100:
+                                       netdev->tx_queue_len = 100;
+                                       break;
+                               }
+                       }
+
                        netif_carrier_on(netdev);
                        netif_wake_queue(netdev);
                        mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 
        e1000_update_adaptive(&adapter->hw);
 
-       if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
+#ifdef CONFIG_E1000_MQ
+       txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
+#endif
+       if (!netif_carrier_ok(netdev)) {
                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 {
 #ifdef NETIF_F_TSO
        struct e1000_context_desc *context_desc;
+       struct e1000_buffer *buffer_info;
        unsigned int i;
        uint32_t cmd_length = 0;
        uint16_t ipcse = 0, tucse, mss;
@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
                i = tx_ring->next_to_use;
                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+               buffer_info = &tx_ring->buffer_info[i];
 
                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
+               buffer_info->time_stamp = jiffies;
+
                if (++i == tx_ring->count) i = 0;
                tx_ring->next_to_use = i;
 
-               return 1;
+               return TRUE;
        }
 #endif
 
-       return 0;
+       return FALSE;
 }
 
 static inline boolean_t
@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
               struct sk_buff *skb)
 {
        struct e1000_context_desc *context_desc;
+       struct e1000_buffer *buffer_info;
        unsigned int i;
        uint8_t css;
 
@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                css = skb->h.raw - skb->data;
 
                i = tx_ring->next_to_use;
+               buffer_info = &tx_ring->buffer_info[i];
                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
                context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->tcp_seg_setup.data = 0;
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 
+               buffer_info->time_stamp = jiffies;
+
                if (unlikely(++i == tx_ring->count)) i = 0;
                tx_ring->next_to_use = i;
 
@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
         * overrun the FIFO, adjust the max buffer len if mss
         * drops. */
        if(mss) {
+               uint8_t hdr_len;
                max_per_txd = min(mss << 2, max_per_txd);
                max_txd_pwr = fls(max_per_txd) - 1;
+
+       /* TSO Workaround for 82571/2 Controllers -- if skb->data
+        * points to just header, pull a few bytes of payload from
+        * frags into skb->data */
+               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
+                       (adapter->hw.mac_type == e1000_82571 ||
+                       adapter->hw.mac_type == e1000_82572)) {
+                       unsigned int pull_size;
+                       pull_size = min((unsigned int)4, skb->data_len);
+                       if (!__pskb_pull_tail(skb, pull_size)) {
+                               printk(KERN_ERR "__pskb_pull_tail failed.\n");
+                               dev_kfree_skb_any(skb);
+                               return -EFAULT;
+                       }
+                       len = skb->len - skb->data_len;
+               }
        }
 
        if((mss) || (skb->ip_summed == CHECKSUM_HW))
+       /* reserve a descriptor for the offload context */
                count++;
        count++;
 #else
@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        if(adapter->pcix_82544)
                count += nr_frags;
 
-#ifdef NETIF_F_TSO
-       /* TSO Workaround for 82571/2 Controllers -- if skb->data
-        * points to just header, pull a few bytes of payload from 
-        * frags into skb->data */
-       if (skb_shinfo(skb)->tso_size) {
-               uint8_t hdr_len;
-               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
-               if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && 
-                       (adapter->hw.mac_type == e1000_82571 ||
-                       adapter->hw.mac_type == e1000_82572)) {
-                       unsigned int pull_size;
-                       pull_size = min((unsigned int)4, skb->data_len);
-                       if (!__pskb_pull_tail(skb, pull_size)) {
-                               printk(KERN_ERR "__pskb_pull_tail failed.\n");
-                               dev_kfree_skb_any(skb);
-                               return -EFAULT;
-                       }
-               }
-       }
-#endif
-
        if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
                e1000_transfer_dhcp_info(adapter, skb);
 
@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
+       adapter->tx_timeout_count++;
        e1000_down(adapter);
        e1000_up(adapter);
 }
@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       e1000_update_stats(adapter);
+       /* only return the current stats */
        return &adapter->net_stats;
 }
 
@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
        if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
                (max_frame > MAX_JUMBO_FRAME_SIZE)) {
                        DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
-                       return -EINVAL;
-       }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
-       /* might want this to be bigger enum check... */
-       /* 82571 controllers limit jumbo frame size to 10500 bytes */
-       if ((adapter->hw.mac_type == e1000_82571 || 
-            adapter->hw.mac_type == e1000_82572) &&
-           max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-               DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
-                                   "on 82571 and 82572 controllers.\n");
                return -EINVAL;
        }
 
-       if(adapter->hw.mac_type == e1000_82573 &&
-           max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
-               DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
-                                   "on 82573\n");
-               return -EINVAL;
-       }
-
-       if(adapter->hw.mac_type > e1000_82547_rev_2) {
-               adapter->rx_buffer_len = max_frame;
-               E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
-       } else {
-               if(unlikely((adapter->hw.mac_type < e1000_82543) &&
-                  (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
-                       DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
-                                           "on 82542\n");
+       /* Adapter-specific max frame size limits. */
+       switch (adapter->hw.mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82573:
+               if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+                       DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+                       return -EINVAL;
+               }
+               break;
+       case e1000_82571:
+       case e1000_82572:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+               if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+                       DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
                        return -EINVAL;
-
-               } else {
-                       if(max_frame <= E1000_RXBUFFER_2048) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_2048;
-                       } else if(max_frame <= E1000_RXBUFFER_4096) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_4096;
-                       } else if(max_frame <= E1000_RXBUFFER_8192) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_8192;
-                       } else if(max_frame <= E1000_RXBUFFER_16384) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_16384;
-                       }
                }
+               break;
+       default:
+               /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+               break;
        }
 
+       /* since the driver code now supports splitting a packet across
+        * multiple descriptors, most of the fifo related limitations on
+        * jumbo frame traffic have gone away.
+        * simply use 2k descriptors for everything.
+        *
+        * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+        * means we reserve 2 more, this pushes us to allocate from the next
+        * larger slab size
+        * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+       /* recent hardware supports 1KB granularity */
+       if (adapter->hw.mac_type > e1000_82547_rev_2) {
+               adapter->rx_buffer_len =
+                   ((max_frame < E1000_RXBUFFER_2048) ?
+                       max_frame : E1000_RXBUFFER_2048);
+               E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
+       } else
+               adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+
        netdev->mtu = new_mtu;
 
        if(netif_running(netdev)) {
@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
 
        adapter->net_stats.rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
-               adapter->stats.rlec + adapter->stats.mpc + 
-               adapter->stats.cexterr;
+               adapter->stats.rlec + adapter->stats.cexterr;
+       adapter->net_stats.rx_dropped = 0;
        adapter->net_stats.rx_length_errors = adapter->stats.rlec;
        adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
        adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-       adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
        adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
 
        /* Tx Errors */
@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        uint32_t icr = E1000_READ_REG(hw, ICR);
-#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
+#ifndef CONFIG_E1000_NAPI
        int i;
+#else
+       /* Interrupt Auto-Mask...upon reading ICR,
+        * interrupts are masked.  No need for the
+        * IMC write, but it does mean we should
+        * account for it ASAP. */
+       if (likely(hw->mac_type >= e1000_82571))
+               atomic_inc(&adapter->irq_sem);
 #endif
 
-       if(unlikely(!icr))
+       if (unlikely(!icr)) {
+#ifdef CONFIG_E1000_NAPI
+               if (hw->mac_type >= e1000_82571)
+                       e1000_irq_enable(adapter);
+#endif
                return IRQ_NONE;  /* Not our interrupt */
+       }
 
        if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
                hw->get_link_status = 1;
@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        }
 
 #ifdef CONFIG_E1000_NAPI
-       atomic_inc(&adapter->irq_sem);
-       E1000_WRITE_REG(hw, IMC, ~0);
-       E1000_WRITE_FLUSH(hw);
+       if (unlikely(hw->mac_type < e1000_82571)) {
+               atomic_inc(&adapter->irq_sem);
+               E1000_WRITE_REG(hw, IMC, ~0);
+               E1000_WRITE_FLUSH(hw);
+       }
 #ifdef CONFIG_E1000_MQ
        if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
-               cpu_set(adapter->cpu_for_queue[0],
-                       adapter->rx_sched_call_data.cpumask);
-               for (i = 1; i < adapter->num_queues; i++) {
-                       cpu_set(adapter->cpu_for_queue[i],
-                               adapter->rx_sched_call_data.cpumask);
-                       atomic_inc(&adapter->irq_sem);
-               }
-               atomic_set(&adapter->rx_sched_call_data.count, i);
+               /* We must setup the cpumask once count == 0 since
+                * each cpu bit is cleared when the work is done. */
+               adapter->rx_sched_call_data.cpumask = adapter->cpumask;
+               atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
+               atomic_set(&adapter->rx_sched_call_data.count,
+                          adapter->num_rx_queues);
                smp_call_async_mask(&adapter->rx_sched_call_data);
        } else {
                printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 {
        struct e1000_adapter *adapter;
        int work_to_do = min(*budget, poll_dev->quota);
-       int tx_cleaned, i = 0, work_done = 0;
+       int tx_cleaned = 0, i = 0, work_done = 0;
 
        /* Must NOT use netdev_priv macro here. */
        adapter = poll_dev->priv;
@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
        while (poll_dev != &adapter->polling_netdev[i]) {
                i++;
-               if (unlikely(i == adapter->num_queues))
+               if (unlikely(i == adapter->num_rx_queues))
                        BUG();
        }
 
-       tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+       if (likely(adapter->num_tx_queues == 1)) {
+               /* e1000_clean is called per-cpu.  This lock protects
+                * tx_ring[0] from being cleaned by multiple cpus
+                * simultaneously.  A failure obtaining the lock means
+                * tx_ring[0] is currently being cleaned anyway. */
+               if (spin_trylock(&adapter->tx_queue_lock)) {
+                       tx_cleaned = e1000_clean_tx_irq(adapter,
+                                                       &adapter->tx_ring[0]);
+                       spin_unlock(&adapter->tx_queue_lock);
+               }
+       } else
+               tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+
        adapter->clean_rx(adapter, &adapter->rx_ring[i],
                          &work_done, work_to_do);
 
@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        buffer_info = &tx_ring->buffer_info[i];
                        cleaned = (i == eop);
 
+#ifdef CONFIG_E1000_MQ
+                       tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
-
-                       tx_desc->buffer_addr = 0;
-                       tx_desc->lower.data = 0;
-                       tx_desc->upper.data = 0;
+                       memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
 
                        if(unlikely(++i == tx_ring->count)) i = 0;
                }
 
-               tx_ring->pkt++;
-               
+#ifdef CONFIG_E1000_MQ
+               tx_ring->tx_stats.packets++;
+#endif
+
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                adapter->detect_tx_hung = FALSE;
-               if (tx_ring->buffer_info[i].dma &&
-                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+               if (tx_ring->buffer_info[eop].dma &&
+                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+                              adapter->tx_timeout_factor * HZ)
                    && !(E1000_READ_REG(&adapter->hw, STATUS) &
-                       E1000_STATUS_TXOFF)) {
+                        E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
-                       i = tx_ring->next_to_clean;
-                       eop = tx_ring->buffer_info[i].next_to_watch;
-                       eop_desc = E1000_TX_DESC(*tx_ring, eop);
                        DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+                                       "  Tx Queue             <%lu>\n"
                                        "  TDH                  <%x>\n"
                                        "  TDT                  <%x>\n"
                                        "  next_to_use          <%x>\n"
                                        "  next_to_clean        <%x>\n"
                                        "buffer_info[next_to_clean]\n"
-                                       "  dma                  <%llx>\n"
                                        "  time_stamp           <%lx>\n"
                                        "  next_to_watch        <%x>\n"
                                        "  jiffies              <%lx>\n"
                                        "  next_to_watch.status <%x>\n",
+                               (unsigned long)((tx_ring - adapter->tx_ring) /
+                                       sizeof(struct e1000_tx_ring)),
                                readl(adapter->hw.hw_addr + tx_ring->tdh),
                                readl(adapter->hw.hw_addr + tx_ring->tdt),
                                tx_ring->next_to_use,
-                               i,
-                               (unsigned long long)tx_ring->buffer_info[i].dma,
-                               tx_ring->buffer_info[i].time_stamp,
+                               tx_ring->next_to_clean,
+                               tx_ring->buffer_info[eop].time_stamp,
                                eop,
                                jiffies,
                                eop_desc->upper.fields.status);
@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
        uint32_t length;
        uint8_t last_byte;
        unsigned int i;
-       boolean_t cleaned = FALSE;
+       int cleaned_count = 0;
+       boolean_t cleaned = FALSE, multi_descriptor = FALSE;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
 
        while(rx_desc->status & E1000_RXD_STAT_DD) {
                buffer_info = &rx_ring->buffer_info[i];
+               u8 status;
 #ifdef CONFIG_E1000_NAPI
                if(*work_done >= work_to_do)
                        break;
                (*work_done)++;
 #endif
+               status = rx_desc->status;
                cleaned = TRUE;
-
+               cleaned_count++;
                pci_unmap_single(pdev,
                                 buffer_info->dma,
                                 buffer_info->length,
@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                        }
                }
 
-               /* Good Receive */
-               skb_put(skb, length - ETHERNET_FCS_SIZE);
+               /* code added for copybreak, this should improve
+                * performance for small packets with large amounts
+                * of reassembly being done in the stack */
+#define E1000_CB_LENGTH 256
+               if ((length < E1000_CB_LENGTH) &&
+                  !rx_ring->rx_skb_top &&
+                  /* or maybe (status & E1000_RXD_STAT_EOP) && */
+                  !multi_descriptor) {
+                       struct sk_buff *new_skb =
+                           dev_alloc_skb(length + NET_IP_ALIGN);
+                       if (new_skb) {
+                               skb_reserve(new_skb, NET_IP_ALIGN);
+                               new_skb->dev = netdev;
+                               memcpy(new_skb->data - NET_IP_ALIGN,
+                                      skb->data - NET_IP_ALIGN,
+                                      length + NET_IP_ALIGN);
+                               /* save the skb in buffer_info as good */
+                               buffer_info->skb = skb;
+                               skb = new_skb;
+                               skb_put(skb, length);
+                       }
+               }
+
+               /* end copybreak code */
 
                /* Receive Checksum Offload */
                e1000_rx_checksum(adapter,
-                                 (uint32_t)(rx_desc->status) |
+                                 (uint32_t)(status) |
                                  ((uint32_t)(rx_desc->errors) << 24),
                                  rx_desc->csum, skb);
                skb->protocol = eth_type_trans(skb, netdev);
 #ifdef CONFIG_E1000_NAPI
                if(unlikely(adapter->vlgrp &&
-                           (rx_desc->status & E1000_RXD_STAT_VP))) {
+                           (status & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
                                                 le16_to_cpu(rx_desc->special) &
                                                 E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
-               rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+               rx_ring->rx_stats.packets++;
+               rx_ring->rx_stats.bytes += length;
+#endif
 
 next_desc:
                rx_desc->status = 0;
-               buffer_info->skb = NULL;
-               if(unlikely(++i == rx_ring->count)) i = 0;
 
-               rx_desc = E1000_RX_DESC(*rx_ring, i);
+               /* return some buffers to hardware, one at a time is too slow */
+               if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+                       adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter, rx_ring);
+
+       cleaned_count = E1000_DESC_UNUSED(rx_ring);
+       if (cleaned_count)
+               adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
        return cleaned;
 }
@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
        struct sk_buff *skb;
        unsigned int i, j;
        uint32_t length, staterr;
+       int cleaned_count = 0;
        boolean_t cleaned = FALSE;
 
        i = rx_ring->next_to_clean;
@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                (*work_done)++;
 #endif
                cleaned = TRUE;
+               cleaned_count++;
                pci_unmap_single(pdev, buffer_info->dma,
                                 buffer_info->length,
                                 PCI_DMA_FROMDEVICE);
@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
-               rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+               rx_ring->rx_stats.packets++;
+               rx_ring->rx_stats.bytes += length;
+#endif
 
 next_desc:
                rx_desc->wb.middle.status_error &= ~0xFF;
                buffer_info->skb = NULL;
-               if(unlikely(++i == rx_ring->count)) i = 0;
 
-               rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+               /* return some buffers to hardware, one at a time is too slow */
+               if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+                       adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
                staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter, rx_ring);
+
+       cleaned_count = E1000_DESC_UNUSED(rx_ring);
+       if (cleaned_count)
+               adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
        return cleaned;
 }
@@ -3616,7 +3852,8 @@ next_desc:
 
 static void
 e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
-                       struct e1000_rx_ring *rx_ring)
+                       struct e1000_rx_ring *rx_ring,
+                      int cleaned_count)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
 
-       while(!buffer_info->skb) {
-               skb = dev_alloc_skb(bufsz);
+       while (cleaned_count--) {
+               if (!(skb = buffer_info->skb))
+                       skb = dev_alloc_skb(bufsz);
+               else {
+                       skb_trim(skb, 0);
+                       goto map_skb;
+               }
+
 
                if(unlikely(!skb)) {
                        /* Better luck next round */
+                       adapter->alloc_rx_buff_failed++;
                        break;
                }
 
@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
+map_skb:
                buffer_info->dma = pci_map_single(pdev,
                                                  skb->data,
                                                  adapter->rx_buffer_len,
@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
 static void
 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-                          struct e1000_rx_ring *rx_ring)
+                          struct e1000_rx_ring *rx_ring,
+                         int cleaned_count)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
        ps_page = &rx_ring->ps_page[i];
        ps_page_dma = &rx_ring->ps_page_dma[i];
 
-       while(!buffer_info->skb) {
+       while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 
                for(j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
 
        if((adapter->hw.mng_cookie.status &
                E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-               (vid == adapter->mng_vlan_id))
+           (vid == adapter->mng_vlan_id)) {
+               /* release control to f/w */
+               e1000_release_hw_control(adapter);
                return;
+       }
+
        /* remove VID from filter table */
        index = (vid >> 5) & 0x7F;
        vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
+       uint32_t ctrl, ctrl_ext, rctl, manc, status;
        uint32_t wufc = adapter->wol;
+       int retval = 0;
 
        netif_device_detach(netdev);
 
@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 
                E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
                E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
-               pci_enable_wake(pdev, 3, 1);
-               pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+               retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+               retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
        } else {
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
                E1000_WRITE_REG(&adapter->hw, WUFC, 0);
-               pci_enable_wake(pdev, 3, 0);
-               pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+               retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+               retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
        }
 
        pci_save_state(pdev);
@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                if(manc & E1000_MANC_SMBUS_EN) {
                        manc |= E1000_MANC_ARP_EN;
                        E1000_WRITE_REG(&adapter->hw, MANC, manc);
-                       pci_enable_wake(pdev, 3, 1);
-                       pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+                       retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+                       if (retval)
+                               DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+                       retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+                       if (retval)
+                               DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
                }
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm & ~E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
+        * would have already happened in close and is redundant. */
+       e1000_release_hw_control(adapter);
 
        pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error in setting power state\n");
 
        return 0;
 }
@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t manc, ret_val, swsm;
-       uint32_t ctrl_ext;
+       int retval;
+       uint32_t manc, ret_val;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
+       retval = pci_set_power_state(pdev, PCI_D0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error in setting power state\n");
        ret_val = pci_enable_device(pdev);
        pci_set_master(pdev);
 
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+       retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+       retval = pci_enable_wake(pdev, PCI_D3cold, 0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
 
        e1000_reset(adapter);
        E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev)
                E1000_WRITE_REG(&adapter->hw, MANC, manc);
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm | E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* If the controller is 82573 and f/w is AMT, do not set
+        * DRV_LOAD until the interface is up.  For all other cases,
+        * let the f/w know that the h/w is now under the control
+        * of the driver. */
+       if (adapter->hw.mac_type != e1000_82573 ||
+           !e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
 
        return 0;
 }
@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev)
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev, NULL);
        e1000_clean_tx_irq(adapter, adapter->tx_ring);
+#ifndef CONFIG_E1000_NAPI
+       adapter->clean_rx(adapter, adapter->rx_ring);
+#endif
        enable_irq(adapter->pdev->irq);
 }
 #endif
index ccbbe5ad8e0fb230cd8477fe4df0b2907cea4756..0a7918c625574ed3ce08cee763929135665d10a8 100644 (file)
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
  *
  * Valid Range: 100-100000 (0=off, 1=dynamic)
  *
- * Default Value: 1
+ * Default Value: 8000
  */
 
 E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter)
                } else {
                        tx_ring->count = opt.def;
                }
-               for (i = 0; i < adapter->num_queues; i++)
+               for (i = 0; i < adapter->num_tx_queues; i++)
                        tx_ring[i].count = tx_ring->count;
        }
        { /* Receive Descriptor Count */
@@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter)
                } else {
                        rx_ring->count = opt.def;
                }
-               for (i = 0; i < adapter->num_queues; i++)
+               for (i = 0; i < adapter->num_rx_queues; i++)
                        rx_ring[i].count = rx_ring->count;
        }
        { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
                        e1000_validate_option(&fc, &opt, adapter);
                        adapter->hw.fc = adapter->hw.original_fc = fc;
                } else {
-                       adapter->hw.fc = opt.def;
+                       adapter->hw.fc = adapter->hw.original_fc = opt.def;
                }
        }
        { /* Transmit Interrupt Delay */
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                                         .p = dplx_list }}
                };
 
+               if (e1000_check_phy_reset_block(&adapter->hw)) {
+                       DPRINTK(PROBE, INFO,
+                               "Link active due to SoL/IDER Session. "
+                               "Speed/Duplex/AutoNeg parameter ignored.\n");
+                       return;
+               }
                if (num_Duplex > bd) {
                        dplx = Duplex[bd];
                        e1000_validate_option(&dplx, &opt, adapter);
index 22c3a37bba5a3eaed8e8117e7045efe5257bcfa4..40ae36b20c9dea2811491e93e4de9ceb9d0c3ef5 100644 (file)
@@ -35,6 +35,8 @@
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
 
 #include <linux/bitops.h>
 #include <linux/delay.h>
 /* Constants */
 #define VLAN_HLEN              4
 #define FCS_LEN                        4
-#define WRAP                   NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
+#define DMA_ALIGN              8       /* hw requires 8-byte alignment */
+#define HW_IP_ALIGN            2       /* hw aligns IP header */
+#define WRAP                   HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
 #define RX_SKB_SIZE            ((dev->mtu + WRAP + 7) & ~0x7)
 
-#define INT_CAUSE_UNMASK_ALL           0x0007ffff
-#define INT_CAUSE_UNMASK_ALL_EXT       0x0011ffff
-#define INT_CAUSE_MASK_ALL             0x00000000
-#define INT_CAUSE_MASK_ALL_EXT         0x00000000
+#define INT_UNMASK_ALL                 0x0007ffff
+#define INT_UNMASK_ALL_EXT             0x0011ffff
+#define INT_MASK_ALL                   0x00000000
+#define INT_MASK_ALL_EXT               0x00000000
 #define INT_CAUSE_CHECK_BITS           INT_CAUSE_UNMASK_ALL
 #define INT_CAUSE_CHECK_BITS_EXT       INT_CAUSE_UNMASK_ALL_EXT
 
@@ -78,8 +82,9 @@
 static int eth_port_link_is_up(unsigned int eth_port_num);
 static void eth_port_uc_addr_get(struct net_device *dev,
                                                unsigned char *MacAddr);
-static int mv643xx_eth_real_open(struct net_device *);
-static int mv643xx_eth_real_stop(struct net_device *);
+static void eth_port_set_multicast_list(struct net_device *);
+static int mv643xx_eth_open(struct net_device *);
+static int mv643xx_eth_stop(struct net_device *);
 static int mv643xx_eth_change_mtu(struct net_device *, int);
 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
 static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
  */
 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct mv643xx_private *mp = netdev_priv(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-
-       if ((new_mtu > 9500) || (new_mtu < 64)) {
-               spin_unlock_irqrestore(&mp->lock, flags);
+       if ((new_mtu > 9500) || (new_mtu < 64))
                return -EINVAL;
-       }
 
        dev->mtu = new_mtu;
        /*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
         * to memory is full, which might fail the open function.
         */
        if (netif_running(dev)) {
-               if (mv643xx_eth_real_stop(dev))
-                       printk(KERN_ERR
-                               "%s: Fatal error on stopping device\n",
-                               dev->name);
-               if (mv643xx_eth_real_open(dev))
+               mv643xx_eth_stop(dev);
+               if (mv643xx_eth_open(dev))
                        printk(KERN_ERR
                                "%s: Fatal error on opening device\n",
                                dev->name);
        }
 
-       spin_unlock_irqrestore(&mp->lock, flags);
        return 0;
 }
 
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
        struct mv643xx_private *mp = netdev_priv(dev);
        struct pkt_info pkt_info;
        struct sk_buff *skb;
+       int unaligned;
 
        if (test_and_set_bit(0, &mp->rx_task_busy))
                panic("%s: Error in test_set_bit / clear_bit", dev->name);
 
        while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
-               skb = dev_alloc_skb(RX_SKB_SIZE);
+               skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
                if (!skb)
                        break;
                mp->rx_ring_skbs++;
+               unaligned = (u32)skb->data & (DMA_ALIGN - 1);
+               if (unaligned)
+                       skb_reserve(skb, DMA_ALIGN - unaligned);
                pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
                pkt_info.byte_cnt = RX_SKB_SIZE;
                pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
                                "%s: Error allocating RX Ring\n", dev->name);
                        break;
                }
-               skb_reserve(skb, 2);
+               skb_reserve(skb, HW_IP_ALIGN);
        }
        clear_bit(0, &mp->rx_task_busy);
        /*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
        else {
                /* Return interrupts */
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
-                                                       INT_CAUSE_UNMASK_ALL);
+                                                       INT_UNMASK_ALL);
        }
 #endif
 }
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
                mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
 
        mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
+
+       eth_port_set_multicast_list(dev);
 }
 
 /*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
        if (!(eth_int_cause_ext & (BIT0 | BIT8)))
                return released;
 
-       spin_lock(&mp->lock);
-
        /* Check only queue 0 */
        while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
                if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
                        stats->tx_errors++;
                }
 
-               /*
-                * If return_info is different than 0, release the skb.
-                * The case where return_info is not 0 is only in case
-                * when transmitted a scatter/gather packet, where only
-                * last skb releases the whole chain.
-                */
-               if (pkt_info.return_info) {
-                       if (skb_shinfo(pkt_info.return_info)->nr_frags)
-                               dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
-                       else
-                               dma_unmap_single(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
+               if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+                       dma_unmap_single(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
 
+               if (pkt_info.return_info) {
                        dev_kfree_skb_irq(pkt_info.return_info);
                        released = 0;
-               } else
-                       dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                       pkt_info.byte_cnt, DMA_TO_DEVICE);
+               }
        }
 
-       spin_unlock(&mp->lock);
-
        return released;
 }
 
@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
 
        /* Read interrupt cause registers */
        eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
-                                               INT_CAUSE_UNMASK_ALL;
+                                               INT_UNMASK_ALL;
 
        if (eth_int_cause & BIT1)
                eth_int_cause_ext = mv_read(
                        MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
-                                               INT_CAUSE_UNMASK_ALL_EXT;
+                                               INT_UNMASK_ALL_EXT;
 
 #ifdef MV643XX_NAPI
        if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
        } else {
                if (netif_rx_schedule_prep(dev)) {
                        /* Mask all the interrupts */
-                       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
-                       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
-                                                               (port_num), 0);
+                       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+                                                               INT_MASK_ALL);
+                       /* wait for previous write to complete */
+                       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
                        __netif_rx_schedule(dev);
                }
 #else
@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
                 * with skb's.
                 */
 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
-               /* Unmask all interrupts on ethernet port */
+               /* Mask all interrupts on ethernet port */
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                                       INT_CAUSE_MASK_ALL);
+                                                       INT_MASK_ALL);
+               /* wait for previous write to take effect */
+               mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
                queue_task(&mp->rx_task, &tq_immediate);
                mark_bh(IMMEDIATE_BH);
 #else
@@ -635,56 +627,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
        return coal;
 }
 
-/*
- * mv643xx_eth_open
- *
- * This function is called when openning the network device. The function
- * should initialize all the hardware, initialize cyclic Rx/Tx
- * descriptors chain and buffers and allocate an IRQ to the network
- * device.
- *
- * Input :     a pointer to the network device structure
- *
- * Output :    zero of success , nonzero if fails.
- */
-
-static int mv643xx_eth_open(struct net_device *dev)
-{
-       struct mv643xx_private *mp = netdev_priv(dev);
-       unsigned int port_num = mp->port_num;
-       int err;
-
-       spin_lock_irq(&mp->lock);
-
-       err = request_irq(dev->irq, mv643xx_eth_int_handler,
-                       SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
-
-       if (err) {
-               printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
-                                                               port_num);
-               err = -EAGAIN;
-               goto out;
-       }
-
-       if (mv643xx_eth_real_open(dev)) {
-               printk("%s: Error opening interface\n", dev->name);
-               err = -EBUSY;
-               goto out_free;
-       }
-
-       spin_unlock_irq(&mp->lock);
-
-       return 0;
-
-out_free:
-       free_irq(dev->irq, dev);
-
-out:
-       spin_unlock_irq(&mp->lock);
-
-       return err;
-}
-
 /*
  * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  *
@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
        mp->port_tx_queue_command |= 1;
 }
 
-/* Helper function for mv643xx_eth_open */
-static int mv643xx_eth_real_open(struct net_device *dev)
+/*
+ * mv643xx_eth_open
+ *
+ * This function is called when openning the network device. The function
+ * should initialize all the hardware, initialize cyclic Rx/Tx
+ * descriptors chain and buffers and allocate an IRQ to the network
+ * device.
+ *
+ * Input :     a pointer to the network device structure
+ *
+ * Output :    zero of success , nonzero if fails.
+ */
+
+static int mv643xx_eth_open(struct net_device *dev)
 {
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
        unsigned int size;
+       int err;
+
+       err = request_irq(dev->irq, mv643xx_eth_int_handler,
+                       SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       if (err) {
+               printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
+                                                               port_num);
+               return -EAGAIN;
+       }
 
        /* Stop RX Queues */
        mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
 
-       /* Clear the ethernet port interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
-       /* Unmask RX buffer and TX end interrupt */
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL);
-
-       /* Unmask phy and link status changes interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL_EXT);
-
        /* Set the MAC Address */
        memcpy(mp->port_mac_addr, dev->dev_addr, 6);
 
@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
                                                                GFP_KERNEL);
        if (!mp->rx_skb) {
                printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_irq;
        }
        mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
                                                                GFP_KERNEL);
        if (!mp->tx_skb) {
                printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
-               kfree(mp->rx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_rx_skb;
        }
 
        /* Allocate TX ring */
@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
        if (!mp->p_tx_desc_area) {
                printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
                                                        dev->name, size);
-               kfree(mp->rx_skb);
-               kfree(mp->tx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_tx_skb;
        }
        BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
        memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
                printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
                                                        dev->name);
                if (mp->rx_sram_size)
-                       iounmap(mp->p_rx_desc_area);
+                       iounmap(mp->p_tx_desc_area);
                else
                        dma_free_coherent(NULL, mp->tx_desc_area_size,
                                        mp->p_tx_desc_area, mp->tx_desc_dma);
-               kfree(mp->rx_skb);
-               kfree(mp->tx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_tx_skb;
        }
        memset((void *)mp->p_rx_desc_area, 0, size);
 
@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
        mp->tx_int_coal =
                eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
 
-       netif_start_queue(dev);
+       /* Clear any pending ethernet port interrupts */
+       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+       /* Unmask phy and link status changes interrupts */
+       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+                                               INT_UNMASK_ALL_EXT);
 
+       /* Unmask RX buffer and TX end interrupt */
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
        return 0;
+
+out_free_tx_skb:
+       kfree(mp->tx_skb);
+out_free_rx_skb:
+       kfree(mp->rx_skb);
+out_free_irq:
+       free_irq(dev->irq, dev);
+
+       return err;
 }
 
 static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
        unsigned int curr;
+       struct sk_buff *skb;
 
        /* Stop Tx Queues */
        mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
 
        /* Free outstanding skb's on TX rings */
        for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
-               if (mp->tx_skb[curr]) {
-                       dev_kfree_skb(mp->tx_skb[curr]);
+               skb = mp->tx_skb[curr];
+               if (skb) {
+                       mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
+                       dev_kfree_skb(skb);
                        mp->tx_ring_skbs--;
                }
        }
@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
  * Output :    zero if success , nonzero if fails
  */
 
-/* Helper function for mv643xx_eth_stop */
-
-static int mv643xx_eth_real_stop(struct net_device *dev)
+static int mv643xx_eth_stop(struct net_device *dev)
 {
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
 
+       /* Mask all interrupts on ethernet port */
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+       /* wait for previous write to complete */
+       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
+#ifdef MV643XX_NAPI
+       netif_poll_disable(dev);
+#endif
        netif_carrier_off(dev);
        netif_stop_queue(dev);
 
-       mv643xx_eth_free_tx_rings(dev);
-       mv643xx_eth_free_rx_rings(dev);
-
        eth_port_reset(mp->port_num);
 
-       /* Disable ethernet port interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
-       /* Mask RX buffer and TX end interrupt */
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
-
-       /* Mask phy and link status changes interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
-
-       return 0;
-}
-
-static int mv643xx_eth_stop(struct net_device *dev)
-{
-       struct mv643xx_private *mp = netdev_priv(dev);
-
-       spin_lock_irq(&mp->lock);
+       mv643xx_eth_free_tx_rings(dev);
+       mv643xx_eth_free_rx_rings(dev);
 
-       mv643xx_eth_real_stop(dev);
+#ifdef MV643XX_NAPI
+       netif_poll_enable(dev);
+#endif
 
        free_irq(dev->irq, dev);
-       spin_unlock_irq(&mp->lock);
 
        return 0;
 }
@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
        struct pkt_info pkt_info;
 
        while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
-               if (pkt_info.return_info) {
-                       if (skb_shinfo(pkt_info.return_info)->nr_frags)
-                               dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
-                       else
-                               dma_unmap_single(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
+               if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+                       dma_unmap_single(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
 
+               if (pkt_info.return_info)
                        dev_kfree_skb_irq(pkt_info.return_info);
-               } else
-                       dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                       pkt_info.byte_cnt, DMA_TO_DEVICE);
        }
 
        if (netif_queue_stopped(dev) &&
@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
        struct mv643xx_private *mp = netdev_priv(dev);
        int done = 1, orig_budget, work_done;
        unsigned int port_num = mp->port_num;
-       unsigned long flags;
 
 #ifdef MV643XX_TX_FAST_REFILL
        if (++mp->tx_clean_threshold > 5) {
-               spin_lock_irqsave(&mp->lock, flags);
                mv643xx_tx(dev);
                mp->tx_clean_threshold = 0;
-               spin_unlock_irqrestore(&mp->lock, flags);
        }
 #endif
 
@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
        }
 
        if (done) {
-               spin_lock_irqsave(&mp->lock, flags);
-               __netif_rx_complete(dev);
+               netif_rx_complete(dev);
                mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
                mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL);
-               mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL_EXT);
-               spin_unlock_irqrestore(&mp->lock, flags);
+                                               INT_UNMASK_ALL);
        }
 
        return done ? 0 : 1;
 }
 #endif
 
+/* Hardware can't handle unaligned fragments smaller than 9 bytes.
+ * This helper function detects that case.
+ */
+
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+        unsigned int frag;
+        skb_frag_t *fragp;
+
+        for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+                fragp = &skb_shinfo(skb)->frags[frag];
+                if (fragp->size <= 8 && fragp->page_offset & 0x7)
+                        return 1;
+
+        }
+        return 0;
+}
+
+
 /*
  * mv643xx_eth_start_xmit
  *
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return 1;
        }
 
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+       if (has_tiny_unaligned_frags(skb)) {
+               if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
+                       stats->tx_dropped++;
+                       printk(KERN_DEBUG "%s: failed to linearize tiny "
+                                       "unaligned fragment\n", dev->name);
+                       return 1;
+               }
+       }
+
        spin_lock_irqsave(&mp->lock, flags);
 
-       /* Update packet info data structure -- DMA owned, first last */
-#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
        if (!skb_shinfo(skb)->nr_frags) {
-linear:
                if (skb->ip_summed != CHECKSUM_HW) {
                        /* Errata BTS #50, IHL must be 5 if no HW checksum */
                        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
                                           5 << ETH_TX_IHL_SHIFT;
                        pkt_info.l4i_chk = 0;
                } else {
-
                        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
                                           ETH_TX_FIRST_DESC |
                                           ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
                                           ETH_GEN_IP_V_4_CHECKSUM |
                                           skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
                        /* CPU already calculated pseudo header checksum. */
-                       if (skb->nh.iph->protocol == IPPROTO_UDP) {
+                       if ((skb->protocol == ETH_P_IP) &&
+                           (skb->nh.iph->protocol == IPPROTO_UDP) ) {
                                pkt_info.cmd_sts |= ETH_UDP_FRAME;
                                pkt_info.l4i_chk = skb->h.uh->check;
-                       } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+                       } else if ((skb->protocol == ETH_P_IP) &&
+                                  (skb->nh.iph->protocol == IPPROTO_TCP))
                                pkt_info.l4i_chk = skb->h.th->check;
                        else {
                                printk(KERN_ERR
-                                       "%s: chksum proto != TCP or UDP\n",
+                                       "%s: chksum proto != IPv4 TCP or UDP\n",
                                        dev->name);
                                spin_unlock_irqrestore(&mp->lock, flags);
                                return 1;
@@ -1183,26 +1158,6 @@ linear:
        } else {
                unsigned int frag;
 
-               /* Since hardware can't handle unaligned fragments smaller
-                * than 9 bytes, if we find any, we linearize the skb
-                * and start again.  When I've seen it, it's always been
-                * the first frag (probably near the end of the page),
-                * but we check all frags to be safe.
-                */
-               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
-                       skb_frag_t *fragp;
-
-                       fragp = &skb_shinfo(skb)->frags[frag];
-                       if (fragp->size <= 8 && fragp->page_offset & 0x7) {
-                               skb_linearize(skb, GFP_ATOMIC);
-                               printk(KERN_DEBUG "%s: unaligned tiny fragment"
-                                               "%d of %d, fixed\n",
-                                               dev->name, frag,
-                                               skb_shinfo(skb)->nr_frags);
-                               goto linear;
-                       }
-               }
-
                /* first frag which is skb header */
                pkt_info.byte_cnt = skb_headlen(skb);
                pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
                                           ETH_GEN_IP_V_4_CHECKSUM |
                                           skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
                        /* CPU already calculated pseudo header checksum. */
-                       if (skb->nh.iph->protocol == IPPROTO_UDP) {
+                       if ((skb->protocol == ETH_P_IP) &&
+                           (skb->nh.iph->protocol == IPPROTO_UDP)) {
                                pkt_info.cmd_sts |= ETH_UDP_FRAME;
                                pkt_info.l4i_chk = skb->h.uh->check;
-                       } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+                       } else if ((skb->protocol == ETH_P_IP) &&
+                                  (skb->nh.iph->protocol == IPPROTO_TCP))
                                pkt_info.l4i_chk = skb->h.th->check;
                        else {
                                printk(KERN_ERR
-                                       "%s: chksum proto != TCP or UDP\n",
+                                       "%s: chksum proto != IPv4 TCP or UDP\n",
                                        dev->name);
                                spin_unlock_irqrestore(&mp->lock, flags);
                                return 1;
@@ -1288,6 +1245,8 @@ linear:
                }
        }
 #else
+       spin_lock_irqsave(&mp->lock, flags);
+
        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
                                                        ETH_TX_LAST_DESC;
        pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
-{
-       int port_num = mp->port_num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                       INT_CAUSE_UNMASK_ALL);
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                       INT_CAUSE_UNMASK_ALL_EXT);
-       spin_unlock_irqrestore(&mp->lock, flags);
-}
-
-static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
-{
-       int port_num = mp->port_num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                       INT_CAUSE_MASK_ALL);
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                       INT_CAUSE_MASK_ALL_EXT);
-       spin_unlock_irqrestore(&mp->lock, flags);
-}
-
 static void mv643xx_netpoll(struct net_device *netdev)
 {
        struct mv643xx_private *mp = netdev_priv(netdev);
+       int port_num = mp->port_num;
+
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+       /* wait for previous write to complete */
+       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
 
-       mv643xx_disable_irq(mp);
        mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
-       mv643xx_enable_irq(mp);
+
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
 }
 #endif
 
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
         * Zero copy can only work if we use Discovery II memory. Else, we will
         * have to map the buffers to ISA memory which is only 16 MB
         */
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
 #endif
 #endif
 
@@ -2053,6 +1991,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
        return 1;
 }
 
+/*
+ * The entries in each table are indexed by a hash of a packet's MAC
+ * address.  One bit in each entry determines whether the packet is
+ * accepted.  There are 4 entries (each 8 bits wide) in each register
+ * of the table.  The bits in each entry are defined as follows:
+ *     0       Accept=1, Drop=0
+ *     3-1     Queue                   (ETH_Q0=0)
+ *     7-4     Reserved = 0;
+ */
+static void eth_port_set_filter_table_entry(int table, unsigned char entry)
+{
+       unsigned int table_reg;
+       unsigned int tbl_offset;
+       unsigned int reg_offset;
+
+       tbl_offset = (entry / 4) * 4;   /* Register offset of DA table entry */
+       reg_offset = entry % 4;         /* Entry offset within the register */
+
+       /* Set "accepts frame bit" at specified table entry */
+       table_reg = mv_read(table + tbl_offset);
+       table_reg |= 0x01 << (8 * reg_offset);
+       mv_write(table + tbl_offset, table_reg);
+}
+
+/*
+ * eth_port_mc_addr - Multicast address settings.
+ *
+ * The MV device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ *    0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
+ *    The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ *    Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8bit
+ *    is used as an index to the Other Multicast Table entries in the
+ *    DA-Filter table.  This function calculates the CRC-8bit value.
+ * In either case, eth_port_set_filter_table_entry() is then called
+ * to set to set the actual table entry.
+ */
+static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
+{
+       unsigned int mac_h;
+       unsigned int mac_l;
+       unsigned char crc_result = 0;
+       int table;
+       int mac_array[48];
+       int crc[8];
+       int i;
+
+       if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
+           (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
+               table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                                       (eth_port_num);
+               eth_port_set_filter_table_entry(table, p_addr[5]);
+               return;
+       }
+
+       /* Calculate CRC-8 out of the given address */
+       mac_h = (p_addr[0] << 8) | (p_addr[1]);
+       mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
+                       (p_addr[4] << 8) | (p_addr[5] << 0);
+
+       for (i = 0; i < 32; i++)
+               mac_array[i] = (mac_l >> i) & 0x1;
+       for (i = 32; i < 48; i++)
+               mac_array[i] = (mac_h >> (i - 32)) & 0x1;
+
+       crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
+                mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
+                mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
+                mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
+                mac_array[8]  ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[0];
+
+       crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
+                mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
+                mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
+                mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
+                mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
+                mac_array[9]  ^ mac_array[6]  ^ mac_array[1]  ^ mac_array[0];
+
+       crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
+                mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
+                mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
+                mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^
+                mac_array[6]  ^ mac_array[2]  ^ mac_array[1]  ^ mac_array[0];
+
+       crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
+                mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
+                mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
+                mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[7]  ^
+                mac_array[3]  ^ mac_array[2]  ^ mac_array[1];
+
+       crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
+                mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
+                mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
+                mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
+                mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^ mac_array[4]  ^
+                mac_array[3]  ^ mac_array[2];
+
+       crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
+                mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
+                mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
+                mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
+                mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[5]  ^
+                mac_array[4]  ^ mac_array[3];
+
+       crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
+                mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
+                mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
+                mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
+                mac_array[12] ^ mac_array[10] ^ mac_array[6]  ^ mac_array[5]  ^
+                mac_array[4];
+
+       crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
+                mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
+                mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
+                mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
+                mac_array[11] ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[5];
+
+       for (i = 0; i < 8; i++)
+               crc_result = crc_result | (crc[i] << i);
+
+       table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
+       eth_port_set_filter_table_entry(table, crc_result);
+}
+
+/*
+ * Set the entire multicast list based on dev->mc_list.
+ */
+static void eth_port_set_multicast_list(struct net_device *dev)
+{
+
+       struct dev_mc_list      *mc_list;
+       int                     i;
+       int                     table_index;
+       struct mv643xx_private  *mp = netdev_priv(dev);
+       unsigned int            eth_port_num = mp->port_num;
+
+       /* If the device is in promiscuous mode or in all multicast mode,
+        * we will fully populate both multicast tables with accept.
+        * This is guaranteed to yield a match on all multicast addresses...
+        */
+       if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
+               for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+                        /* Set all entries in DA filter special multicast
+                         * table (Ex_dFSMT)
+                         * Set for ETH_Q0 for now
+                         * Bits
+                         * 0     Accept=1, Drop=0
+                         * 3-1  Queue   ETH_Q0=0
+                         * 7-4  Reserved = 0;
+                         */
+                        mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+
+                        /* Set all entries in DA filter other multicast
+                         * table (Ex_dFOMT)
+                         * Set for ETH_Q0 for now
+                         * Bits
+                         * 0     Accept=1, Drop=0
+                         * 3-1  Queue   ETH_Q0=0
+                         * 7-4  Reserved = 0;
+                         */
+                        mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+               }
+               return;
+       }
+
+       /* We will clear out multicast tables every time we get the list.
+        * Then add the entire new list...
+        */
+       for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+               /* Clear DA filter special multicast table (Ex_dFSMT) */
+               mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                               (eth_port_num) + table_index, 0);
+
+               /* Clear DA filter other multicast table (Ex_dFOMT) */
+               mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+                               (eth_port_num) + table_index, 0);
+       }
+
+       /* Get pointer to net_device multicast list and add each one... */
+       for (i = 0, mc_list = dev->mc_list;
+                       (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
+                       i++, mc_list = mc_list->next)
+               if (mc_list->dmi_addrlen == 6)
+                       eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
+}
+
 /*
  * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  *
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
 
        for (table_index = 0; table_index <= 0xFC; table_index += 4) {
                /* Clear DA filter special multicast table (Ex_dFSMT) */
-               mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
-                                       (eth_port_num) + table_index), 0);
+               mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                                       (eth_port_num) + table_index, 0);
                /* Clear DA filter other multicast table (Ex_dFOMT) */
-               mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
-                                       (eth_port_num) + table_index), 0);
+               mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+                                       (eth_port_num) + table_index, 0);
        }
 }
 
@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
        struct eth_tx_desc *current_descriptor;
        struct eth_tx_desc *first_descriptor;
        u32 command;
+       unsigned long flags;
 
        /* Do not process Tx ring in case of Tx ring resource error */
        if (mp->tx_resource_err)
@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
                return ETH_ERROR;
        }
 
+       spin_lock_irqsave(&mp->lock, flags);
+
        mp->tx_ring_skbs++;
        BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
 
@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
                mp->tx_resource_err = 1;
                mp->tx_curr_desc_q = tx_first_desc;
 
+               spin_unlock_irqrestore(&mp->lock, flags);
+
                return ETH_QUEUE_LAST_RESOURCE;
        }
 
        mp->tx_curr_desc_q = tx_next_desc;
 
+       spin_unlock_irqrestore(&mp->lock, flags);
+
        return ETH_OK;
 }
 #else
@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
        int tx_desc_used;
        struct eth_tx_desc *current_descriptor;
        unsigned int command_status;
+       unsigned long flags;
 
        /* Do not process Tx ring in case of Tx ring resource error */
        if (mp->tx_resource_err)
                return ETH_QUEUE_FULL;
 
+       spin_lock_irqsave(&mp->lock, flags);
+
        mp->tx_ring_skbs++;
        BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
 
@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
        /* Check for ring index overlap in the Tx desc ring */
        if (tx_desc_curr == tx_desc_used) {
                mp->tx_resource_err = 1;
+
+               spin_unlock_irqrestore(&mp->lock, flags);
                return ETH_QUEUE_LAST_RESOURCE;
        }
 
+       spin_unlock_irqrestore(&mp->lock, flags);
        return ETH_OK;
 }
 #endif
@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
  *     Tx ring 'first' and 'used' indexes are updated.
  *
  * RETURN:
- *     ETH_ERROR in case the routine can not access Tx desc ring.
- *     ETH_RETRY in case there is transmission in process.
- *     ETH_END_OF_JOB if the routine has nothing to release.
- *     ETH_OK otherwise.
+ *     ETH_OK on success
+ *     ETH_ERROR otherwise.
  *
  */
 static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
                                                struct pkt_info *p_pkt_info)
 {
        int tx_desc_used;
+       int tx_busy_desc;
+       struct eth_tx_desc *p_tx_desc_used;
+       unsigned int command_status;
+       unsigned long flags;
+       int err = ETH_OK;
+
+       spin_lock_irqsave(&mp->lock, flags);
+
 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
-       int tx_busy_desc = mp->tx_first_desc_q;
+       tx_busy_desc = mp->tx_first_desc_q;
 #else
-       int tx_busy_desc = mp->tx_curr_desc_q;
+       tx_busy_desc = mp->tx_curr_desc_q;
 #endif
-       struct eth_tx_desc *p_tx_desc_used;
-       unsigned int command_status;
 
        /* Get the Tx Desc ring indexes */
        tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
        p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
 
        /* Sanity check */
-       if (p_tx_desc_used == NULL)
-               return ETH_ERROR;
+       if (p_tx_desc_used == NULL) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        /* Stop release. About to overlap the current available Tx descriptor */
-       if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err)
-               return ETH_END_OF_JOB;
+       if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        command_status = p_tx_desc_used->cmd_sts;
 
        /* Still transmitting... */
-       if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
-               return ETH_RETRY;
+       if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        /* Pass the packet information to the caller */
        p_pkt_info->cmd_sts = command_status;
        p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
+       p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
+       p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
        mp->tx_skb[tx_desc_used] = NULL;
 
        /* Update the next descriptor to release. */
@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
        BUG_ON(mp->tx_ring_skbs == 0);
        mp->tx_ring_skbs--;
 
-       return ETH_OK;
+out:
+       spin_unlock_irqrestore(&mp->lock, flags);
+
+       return err;
 }
 
 /*
@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
        volatile struct eth_rx_desc *p_rx_desc;
        unsigned int command_status;
+       unsigned long flags;
 
        /* Do not process Rx ring in case of Rx ring resource error */
        if (mp->rx_resource_err)
                return ETH_QUEUE_FULL;
 
+       spin_lock_irqsave(&mp->lock, flags);
+
        /* Get the Rx Desc ring 'curr and 'used' indexes */
        rx_curr_desc = mp->rx_curr_desc_q;
        rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        rmb();
 
        /* Nothing to receive... */
-       if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+       if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+               spin_unlock_irqrestore(&mp->lock, flags);
                return ETH_END_OF_JOB;
+       }
 
        p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
        p_pkt_info->cmd_sts = command_status;
@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        if (rx_next_curr_desc == rx_used_desc)
                mp->rx_resource_err = 1;
 
+       spin_unlock_irqrestore(&mp->lock, flags);
+
        return ETH_OK;
 }
 
@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
 {
        int used_rx_desc;       /* Where to return Rx resource */
        volatile struct eth_rx_desc *p_used_rx_desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mp->lock, flags);
 
        /* Get 'used' Rx descriptor */
        used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
        /* Any Rx return cancels the Rx resource error status */
        mp->rx_resource_err = 0;
 
+       spin_unlock_irqrestore(&mp->lock, flags);
+
        return ETH_OK;
 }
 
index b538e3038058a7afe022a83fbadf9876dac0ffbb..bf55a4cfb3d25e6a8c401f7518b98bb5e9b74f56 100644 (file)
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
+       if (sizeof(dma_addr_t) > sizeof(u32) &&
+           !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
                using_dac = 1;
-       else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
-               printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-                      pci_name(pdev));
-               goto err_out_free_regions;
+               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (err < 0) {
+                       printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+                              "for consistent allocations\n", pci_name(pdev));
+                       goto err_out_free_regions;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               if (err) {
+                       printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+                              pci_name(pdev));
+                       goto err_out_free_regions;
+               }
        }
 
 #ifdef __BIG_ENDIAN
index f5d697c0c0315e6fa0594a4bb216a5ab89b234e1..f8b973a04b657adcaaaaaf4c8103c9ecea2ce4d2 100644 (file)
@@ -57,7 +57,7 @@
 #include "sky2.h"
 
 #define DRV_NAME               "sky2"
-#define DRV_VERSION            "0.11"
+#define DRV_VERSION            "0.13"
 #define PFX                    DRV_NAME " "
 
 /*
@@ -75,6 +75,7 @@
 #define RX_LE_BYTES            (RX_LE_SIZE*sizeof(struct sky2_rx_le))
 #define RX_MAX_PENDING         (RX_LE_SIZE/2 - 2)
 #define RX_DEF_PENDING         RX_MAX_PENDING
+#define RX_SKB_ALIGN           8
 
 #define TX_RING_SIZE           512
 #define TX_DEF_PENDING         (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
 static const u32 default_msg =
     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
     | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
-    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
+    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
 
 static int debug = -1;         /* defaults above */
 module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 
 }
 
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
+/* Assign Ram Buffer allocation.
+ * start and end are in units of 4k bytes
+ * ram registers are in units of 64bit words
+ */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
 {
-       u32 end;
+       u32 start, end;
 
-       start /= 8;
-       len /= 8;
-       end = start + len - 1;
+       start = startk * 4096/8;
+       end = (endk * 4096/8) - 1;
 
        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
        sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
        sky2_write32(hw, RB_ADDR(q, RB_RP), start);
 
        if (q == Q_R1 || q == Q_R2) {
-               u32 rxup, rxlo;
+               u32 space = (endk - startk) * 4096/8;
+               u32 tp = space - space/4;
 
-               rxlo = len/2;
-               rxup = rxlo + len/4;
+               /* On receive queue's set the thresholds
+                * give receiver priority when > 3/4 full
+                * send pause when down to 2K
+                */
+               sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+               sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 
-               /* Set thresholds on receive queue's */
-               sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
-               sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
+               tp = space - 2048/8;
+               sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+               sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
        } else {
                /* Enable store & forward on Tx queue's because
                 * Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
  * This is a workaround code taken from SysKonnect sk98lin driver
  * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
  */
-static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
+static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
                                u16 idx, u16 *last, u16 size)
 {
+       wmb();
        if (is_ec_a1(hw) && idx < *last) {
                u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
 
@@ -721,6 +731,7 @@ setnew:
                sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
        }
        *last = idx;
+       mmiowb();
 }
 
 
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
 /* Return high part of DMA address (could be 32 or 64 bit) */
 static inline u32 high32(dma_addr_t a)
 {
-       return (a >> 16) >> 16;
+       return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
 }
 
 /* Build description to hardware about buffer */
-static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
+static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
 {
        struct sky2_rx_le *le;
        u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
        struct sky2_hw *hw = sky2->hw;
        u16 port = sky2->port;
 
-       spin_lock(&sky2->tx_lock);
+       spin_lock_bh(&sky2->tx_lock);
 
        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
        sky2->vlgrp = grp;
 
-       spin_unlock(&sky2->tx_lock);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 
 static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        struct sky2_hw *hw = sky2->hw;
        u16 port = sky2->port;
 
-       spin_lock(&sky2->tx_lock);
+       spin_lock_bh(&sky2->tx_lock);
 
        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
        if (sky2->vlgrp)
                sky2->vlgrp->vlan_devices[vid] = NULL;
 
-       spin_unlock(&sky2->tx_lock);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 #endif
 
+/*
+ * It appears the hardware has a bug in the FIFO logic that
+ * cause it to hang if the FIFO gets overrun and the receive buffer
+ * is not aligned. ALso alloc_skb() won't align properly if slab
+ * debugging is enabled.
+ */
+static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+       if (likely(skb)) {
+               unsigned long p = (unsigned long) skb->data;
+               skb_reserve(skb,
+                       ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
+       }
+
+       return skb;
+}
+
 /*
  * Allocate and setup receiver buffer pool.
  * In case of 64 bit dma, there are 2X as many list elements
  * available as ring entries
  * and need to reserve one list element so we don't wrap around.
- *
- * It appears the hardware has a bug in the FIFO logic that
- * cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned.  This means we can't use skb_reserve to align
- * the IP header.
  */
 static int sky2_rx_start(struct sky2_port *sky2)
 {
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
        for (i = 0; i < sky2->rx_pending; i++) {
                struct ring_info *re = sky2->rx_ring + i;
 
-               re->skb = dev_alloc_skb(sky2->rx_bufsize);
+               re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
                if (!re->skb)
                        goto nomem;
 
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
 
        sky2_mac_init(hw, port);
 
-       /* Configure RAM buffers */
-       if (hw->chip_id == CHIP_ID_YUKON_FE ||
-           (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
-               ramsize = 4096;
-       else {
-               u8 e0 = sky2_read8(hw, B2_E_0);
-               ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
-       }
+       /* Determine available ram buffer space (in 4K blocks).
+        * Note: not sure about the FE setting below yet
+        */
+       if (hw->chip_id == CHIP_ID_YUKON_FE)
+               ramsize = 4;
+       else
+               ramsize = sky2_read8(hw, B2_E_0);
+
+       /* Give transmitter one third (rounded up) */
+       rxspace = ramsize - (ramsize + 2) / 3;
 
-       /* 2/3 for Rx */
-       rxspace = (2 * ramsize) / 3;
        sky2_ramset(hw, rxqaddr[port], 0, rxspace);
-       sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+       sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
 
        /* Make sure SyncQ is disabled */
        sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
 }
 
 /* Estimate of number of transmit list elements required */
-static inline unsigned tx_le_req(const struct sk_buff *skb)
+static unsigned tx_le_req(const struct sk_buff *skb)
 {
        unsigned count;
 
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        u16 mss;
        u8 ctrl;
 
+       /* No BH disabling for tx_lock here.  We are running in BH disabled
+        * context and TX reclaim runs via poll inside of a software
+        * interrupt, and no related locks in IRQ processing.
+        */
        if (!spin_trylock(&sky2->tx_lock))
                return NETDEV_TX_LOCKED;
 
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                 */
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
-                       printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
-                              dev->name);
+                       if (net_ratelimit())
+                               printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
+                                      dev->name);
                }
                spin_unlock(&sky2->tx_lock);
 
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 
                mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
                                       frag->size, PCI_DMA_TODEVICE);
-               addr64 = (mapping >> 16) >> 16;
+               addr64 = high32(mapping);
                if (addr64 != sky2->tx_addr64) {
                        le = get_tx_le(sky2);
                        le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
 
 out_unlock:
-       mmiowb();
        spin_unlock(&sky2->tx_lock);
 
        dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                dev_kfree_skb_any(skb);
        }
 
-       spin_lock(&sky2->tx_lock);
        sky2->tx_cons = put;
        if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
                netif_wake_queue(dev);
-       spin_unlock(&sky2->tx_lock);
 }
 
 /* Cleanup all untransmitted buffers, assume transmitter not running */
 static void sky2_tx_clean(struct sky2_port *sky2)
 {
+       spin_lock_bh(&sky2->tx_lock);
        sky2_tx_complete(sky2, sky2->tx_prod);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 
 /* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
        local_irq_enable();
 }
 
+
+/* Transmit timeout is only called if we are running, carries is up
+ * and tx queue is full (stopped).
+ */
 static void sky2_tx_timeout(struct net_device *dev)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
        unsigned txq = txqaddr[sky2->port];
+       u16 ridx;
+
+       /* Maybe we just missed an status interrupt */
+       spin_lock(&sky2->tx_lock);
+       ridx = sky2_read16(hw,
+                          sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
+       sky2_tx_complete(sky2, ridx);
+       spin_unlock(&sky2->tx_lock);
+
+       if (!netif_queue_stopped(dev)) {
+               if (net_ratelimit())
+                       pr_info(PFX "transmit interrupt missed? recovered\n");
+               return;
+       }
 
        if (netif_msg_timer(sky2))
                printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
 
-       netif_stop_queue(dev);
-
        sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
-       sky2_read32(hw, Q_ADDR(txq, Q_CSR));
-
        sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
 
        sky2_tx_clean(sky2);
 
        sky2_qset(hw, txq);
        sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
-
-       netif_wake_queue(dev);
 }
 
 
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
        } else {
                struct sk_buff *nskb;
 
-               nskb = dev_alloc_skb(sky2->rx_bufsize);
+               nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
                if (!nskb)
                        goto resubmit;
 
@@ -1745,7 +1787,7 @@ oversize:
 error:
        ++sky2->net_stats.rx_errors;
 
-       if (netif_msg_rx_err(sky2))
+       if (netif_msg_rx_err(sky2) && net_ratelimit())
                printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
                       sky2->netdev->name, status, length);
 
@@ -1766,13 +1808,16 @@ error:
  */
 #define TX_NO_STATUS   0xffff
 
-static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
 {
        if (last != TX_NO_STATUS) {
                struct net_device *dev = hw->dev[port];
                if (dev && netif_running(dev)) {
                        struct sky2_port *sky2 = netdev_priv(dev);
+
+                       spin_lock(&sky2->tx_lock);
                        sky2_tx_complete(sky2, last);
+                       spin_unlock(&sky2->tx_lock);
                }
        }
 }
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                struct sk_buff *skb;
                u32 status;
                u16 length;
-               u8 op;
 
                le = hw->st_le + hw->st_idx;
                hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                sky2 = netdev_priv(dev);
                status = le32_to_cpu(le->status);
                length = le16_to_cpu(le->length);
-               op = le->opcode & ~HW_OWNER;
-               le->opcode = 0;
 
-               switch (op) {
+               switch (le->opcode & ~HW_OWNER) {
                case OP_RXSTAT:
                        skb = sky2_receive(sky2, length, status);
                        if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                default:
                        if (net_ratelimit())
                                printk(KERN_WARNING PFX
-                                      "unknown status opcode 0x%x\n", op);
+                                      "unknown status opcode 0x%x\n", le->opcode);
                        break;
                }
        }
 
 exit_loop:
        sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-       mmiowb();
 
        sky2_tx_check(hw, 0, tx_done[0]);
        sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
                netif_rx_complete(dev0);
                hw->intr_mask |= Y2_IS_STAT_BMU;
                sky2_write32(hw, B0_IMSK, hw->intr_mask);
-               mmiowb();
                return 0;
        } else {
                *budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
 {
        struct net_device *dev = hw->dev[port];
 
-       printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
-              dev->name, status);
+       if (net_ratelimit())
+               printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
+                      dev->name, status);
 
        if (status & Y2_IS_PAR_RD1) {
-               printk(KERN_ERR PFX "%s: ram data read parity error\n",
-                      dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: ram data read parity error\n",
+                              dev->name);
                /* Clear IRQ */
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
        }
 
        if (status & Y2_IS_PAR_WR1) {
-               printk(KERN_ERR PFX "%s: ram data write parity error\n",
-                      dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: ram data write parity error\n",
+                              dev->name);
 
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
        }
 
        if (status & Y2_IS_PAR_MAC1) {
-               printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
        }
 
        if (status & Y2_IS_PAR_RX1) {
-               printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
                sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
        }
 
        if (status & Y2_IS_TCP_TXA1) {
-               printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: TCP segmentation error\n",
+                              dev->name);
                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
        }
 }
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
                u16 pci_err;
 
                pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
-               printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
-                      pci_name(hw->pdev), pci_err);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
+                              pci_name(hw->pdev), pci_err);
 
                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
                pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
 
                pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
 
-               printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
-                      pci_name(hw->pdev), pex_err);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
+                              pci_name(hw->pdev), pex_err);
 
                /* clear the interrupt */
                sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
        return 0;
 }
 
-static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
 {
        u32 modes;
        if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
        return dev;
 }
 
-static inline void sky2_show_addr(struct net_device *dev)
+static void __devinit sky2_show_addr(struct net_device *dev)
 {
        const struct sky2_port *sky2 = netdev_priv(dev);
 
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
                goto err_out_free_regions;
        }
 
-       if (sizeof(dma_addr_t) > sizeof(u32)) {
-               err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
-               if (!err)
-                       using_dac = 1;
-       }
+       if (sizeof(dma_addr_t) > sizeof(u32) &&
+           !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+               using_dac = 1;
+               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (err < 0) {
+                       printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+                              "for consistent allocations\n", pci_name(pdev));
+                       goto err_out_free_regions;
+               }
 
-       if (!using_dac) {
+       } else {
                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
                if (err) {
                        printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
                        goto err_out_free_regions;
                }
        }
+
 #ifdef __BIG_ENDIAN
        /* byte swap descriptors in hardware */
        {
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 #endif
 
        err = -ENOMEM;
-       hw = kmalloc(sizeof(*hw), GFP_KERNEL);
+       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
        if (!hw) {
                printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
                       pci_name(pdev));
                goto err_out_free_regions;
        }
 
-       memset(hw, 0, sizeof(*hw));
        hw->pdev = pdev;
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
index 0d765f1733b5cd637e720f2c16369573b4762caf..1f5975a61e1f1526d8a43869c760ccd96e47753b 100644 (file)
@@ -22,7 +22,6 @@
  */
 
 #include <linux/config.h>
-
 #include <linux/compiler.h>
 #include <linux/crc32.h>
 #include <linux/delay.h>
@@ -30,6 +29,7 @@
 #include <linux/ethtool.h>
 #include <linux/firmware.h>
 #include <linux/if_vlan.h>
+#include <linux/in.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/ip.h>
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/tcp.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
        writel(value, card->regs + reg);
 }
 
-/**
- * spider_net_write_reg_sync - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- *
- * Unlike spider_net_write_reg, this will also make sure the
- * data arrives on the card by reading the reg again.
- */
-static void
-spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
-{
-       value = cpu_to_le32(value);
-       writel(value, card->regs + reg);
-       (void)readl(card->regs + reg);
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
-       u32 regvalue;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue &= ~SPIDER_NET_RXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
 /** spider_net_write_phy - write to phy register
  * @netdev: adapter to be written to
  * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
 }
 
 /**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
-       u32 regvalue;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue |= SPIDER_NET_RXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
-/**
- * spider_net_tx_irq_off - switch off tx irq on this spider card
+ * spider_net_rx_irq_off - switch off rx irq on this spider card
  * @card: device structure
  *
- * switches off tx irq by masking them out in the GHIINTnMSK register
+ * switches off rx irq by masking them out in the GHIINTnMSK register
  */
 static void
-spider_net_tx_irq_off(struct spider_net_card *card)
+spider_net_rx_irq_off(struct spider_net_card *card)
 {
        u32 regvalue;
-       unsigned long flags;
 
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue &= ~SPIDER_NET_TXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
+       regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
+       spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
 }
 
 /**
- * spider_net_tx_irq_on - switch on tx irq on this spider card
+ * spider_net_rx_irq_on - switch on rx irq on this spider card
  * @card: device structure
  *
- * switches on tx irq by enabling them in the GHIINTnMSK register
+ * switches on rx irq by enabling them in the GHIINTnMSK register
  */
 static void
-spider_net_tx_irq_on(struct spider_net_card *card)
+spider_net_rx_irq_on(struct spider_net_card *card)
 {
        u32 regvalue;
-       unsigned long flags;
 
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue |= SPIDER_NET_TXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
+       regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
+       spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
 }
 
 /**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
 spider_net_get_descr_status(struct spider_net_descr *descr)
 {
        u32 cmd_status;
-       rmb();
+
        cmd_status = descr->dmac_cmd_status;
-       rmb();
        cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
        /* no need to mask out any bits, as cmd_status is 32 bits wide only
         * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
 {
        u32 cmd_status;
        /* read the status */
-       mb();
        cmd_status = descr->dmac_cmd_status;
        /* clean the upper 4 bits */
        cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
        cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
        /* and write it back */
        descr->dmac_cmd_status = cmd_status;
-       wmb();
 }
 
 /**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
 {
        int i;
        struct spider_net_descr *descr;
+       dma_addr_t buf;
 
-       spin_lock_init(&card->chain_lock);
+       atomic_set(&card->rx_chain_refill,0);
 
        descr = start_descr;
        memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
        for (i=0; i<no; i++, descr++) {
                spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
 
-               descr->bus_addr =
-                       pci_map_single(card->pdev, descr,
-                                      SPIDER_NET_DESCR_SIZE,
-                                      PCI_DMA_BIDIRECTIONAL);
+               buf = pci_map_single(card->pdev, descr,
+                                    SPIDER_NET_DESCR_SIZE,
+                                    PCI_DMA_BIDIRECTIONAL);
 
-               if (descr->bus_addr == DMA_ERROR_CODE)
+               if (buf == DMA_ERROR_CODE)
                        goto iommu_error;
 
+               descr->bus_addr = buf;
                descr->next = descr + 1;
                descr->prev = descr - 1;
 
@@ -439,7 +375,8 @@ iommu_error:
        for (i=0; i < no; i++, descr++)
                if (descr->bus_addr)
                        pci_unmap_single(card->pdev, descr->bus_addr,
-                                        SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
+                                        SPIDER_NET_DESCR_SIZE,
+                                        PCI_DMA_BIDIRECTIONAL);
        return -ENOMEM;
 }
 
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
                if (descr->skb) {
                        dev_kfree_skb(descr->skb);
                        pci_unmap_single(card->pdev, descr->buf_addr,
-                                        SPIDER_NET_MAX_MTU,
+                                        SPIDER_NET_MAX_FRAME,
                                         PCI_DMA_BIDIRECTIONAL);
                }
                descr = descr->next;
@@ -480,12 +417,13 @@ static int
 spider_net_prepare_rx_descr(struct spider_net_card *card,
                            struct spider_net_descr *descr)
 {
+       dma_addr_t buf;
        int error = 0;
        int offset;
        int bufsize;
 
        /* we need to round up the buffer size to a multiple of 128 */
-       bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
+       bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
                (~(SPIDER_NET_RXBUF_ALIGN - 1));
 
        /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
        /* allocate an skb */
        descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
        if (!descr->skb) {
-               if (net_ratelimit())
-                       if (netif_msg_rx_err(card))
-                               pr_err("Not enough memory to allocate "
-                                       "rx buffer\n");
+               if (netif_msg_rx_err(card) && net_ratelimit())
+                       pr_err("Not enough memory to allocate rx buffer\n");
                return -ENOMEM;
        }
        descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
        if (offset)
                skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
        /* io-mmu-map the skb */
-       descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
-                                        SPIDER_NET_MAX_MTU,
-                                        PCI_DMA_BIDIRECTIONAL);
-       if (descr->buf_addr == DMA_ERROR_CODE) {
+       buf = pci_map_single(card->pdev, descr->skb->data,
+                            SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+       descr->buf_addr = buf;
+       if (buf == DMA_ERROR_CODE) {
                dev_kfree_skb_any(descr->skb);
-               if (netif_msg_rx_err(card))
+               if (netif_msg_rx_err(card) && net_ratelimit())
                        pr_err("Could not iommu-map rx buffer\n");
                spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
        } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
 }
 
 /**
- * spider_net_enable_rxctails - sets RX dmac chain tail addresses
+ * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
  * @card: card structure
  *
- * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
+ * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
  * chip by writing to the appropriate register. DMA is enabled in
  * spider_net_enable_rxdmac.
  */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
 static void
 spider_net_enable_rxdmac(struct spider_net_card *card)
 {
+       wmb();
        spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
                             SPIDER_NET_DMA_RX_VALUE);
 }
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
  * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  * @card: card structure
  *
- * refills descriptors in all chains (last used chain first): allocates skbs
- * and iommu-maps them.
+ * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
  */
 static void
 spider_net_refill_rx_chain(struct spider_net_card *card)
 {
        struct spider_net_descr_chain *chain;
-       int count = 0;
-       unsigned long flags;
 
        chain = &card->rx_chain;
 
-       spin_lock_irqsave(&card->chain_lock, flags);
-       while (spider_net_get_descr_status(chain->head) ==
-                               SPIDER_NET_DESCR_NOT_IN_USE) {
-               if (spider_net_prepare_rx_descr(card, chain->head))
-                       break;
-               count++;
-               chain->head = chain->head->next;
-       }
-       spin_unlock_irqrestore(&card->chain_lock, flags);
+       /* one context doing the refill (and a second context seeing that
+        * and omitting it) is ok. If called by NAPI, we'll be called again
+        * as spider_net_decode_one_descr is called several times. If some
+        * interrupt calls us, the NAPI is about to clean up anyway. */
+       if (atomic_inc_return(&card->rx_chain_refill) == 1)
+               while (spider_net_get_descr_status(chain->head) ==
+                      SPIDER_NET_DESCR_NOT_IN_USE) {
+                       if (spider_net_prepare_rx_descr(card, chain->head))
+                               break;
+                       chain->head = chain->head->next;
+               }
 
-       /* could be optimized, only do that, if we know the DMA processing
-        * has terminated */
-       if (count)
-               spider_net_enable_rxdmac(card);
+       atomic_dec(&card->rx_chain_refill);
 }
 
 /**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
        /* this will allocate the rest of the rx buffers; if not, it's
         * business as usual later on */
        spider_net_refill_rx_chain(card);
+       spider_net_enable_rxdmac(card);
        return 0;
 
 error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
  * @card: adapter structure
  * @brutal: if set, don't care about whether descriptor seems to be in use
  *
- * releases the tx descriptors that spider has finished with (if non-brutal)
- * or simply release tx descriptors (if brutal)
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not loose initiative.
  */
-static void
+static int
 spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
 {
        struct spider_net_descr_chain *tx_chain = &card->tx_chain;
        enum spider_net_descr_status status;
 
-       spider_net_tx_irq_off(card);
+       if (atomic_inc_return(&card->tx_chain_release) != 1) {
+               atomic_dec(&card->tx_chain_release);
+               return 1;
+       }
 
-       /* no lock for chain needed, if this is only executed once at a time */
-again:
        for (;;) {
                status = spider_net_get_descr_status(tx_chain->tail);
                switch (status) {
                case SPIDER_NET_DESCR_CARDOWNED:
-                       if (!brutal) goto out;
+                       if (!brutal)
+                               goto out;
                        /* fallthrough, if we release the descriptors
                         * brutally (then we don't care about
                         * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
                tx_chain->tail = tx_chain->tail->next;
        }
 out:
+       atomic_dec(&card->tx_chain_release);
+
        netif_wake_queue(card->netdev);
 
-       if (!brutal) {
-               /* switch on tx irqs (while we are still in the interrupt
-                * handler, so we don't get an interrupt), check again
-                * for done descriptors. This results in fewer interrupts */
-               spider_net_tx_irq_on(card);
-               status = spider_net_get_descr_status(tx_chain->tail);
-               switch (status) {
-                       case SPIDER_NET_DESCR_RESPONSE_ERROR:
-                       case SPIDER_NET_DESCR_PROTECTION_ERROR:
-                       case SPIDER_NET_DESCR_FORCE_END:
-                       case SPIDER_NET_DESCR_COMPLETE:
-                               goto again;
-                       default:
-                               break;
-               }
-       }
+       if (status == SPIDER_NET_DESCR_CARDOWNED)
+               return 1;
+       return 0;
+}
 
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
+ * interrupts to cleanup our TX ring) and returns sent packets to the stack
+ * by freeing them
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+       if ( (spider_net_release_tx_chain(card, 0)) &&
+             (card->netdev->flags & IFF_UP) ) {
+               mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+       }
 }
 
 /**
@@ -726,16 +671,22 @@ out:
 static u8
 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
 {
-       /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
-        * ff:ff:ff:ff:ff:ff must result in 0xfd */
        u32 crc;
        u8 hash;
+       char addr_for_crc[ETH_ALEN] = { 0, };
+       int i, bit;
 
-       crc = crc32_be(~0, addr, netdev->addr_len);
+       for (i = 0; i < ETH_ALEN * 8; i++) {
+               bit = (addr[i / 8] >> (i % 8)) & 1;
+               addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
+       }
+
+       crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
 
        hash = (crc >> 27);
        hash <<= 3;
        hash |= crc & 7;
+       hash &= 0xff;
 
        return hash;
 }
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
 {
        struct spider_net_card *card = netdev_priv(netdev);
 
+       tasklet_kill(&card->rxram_full_tl);
        netif_poll_disable(netdev);
        netif_carrier_off(netdev);
        netif_stop_queue(netdev);
+       del_timer_sync(&card->tx_timer);
 
        /* disable/mask all interrupts */
        spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
  * @skb: packet to consider
  *
  * fills out the command and status field of the descriptor structure,
- * depending on hardware checksum settings. This function assumes a wmb()
- * has executed before.
+ * depending on hardware checksum settings.
  */
 static void
 spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
                               struct sk_buff *skb)
 {
+       /* make sure the other fields in the descriptor are written */
+       wmb();
+
        if (skb->ip_summed != CHECKSUM_HW) {
                descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
                return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
        /* is packet ip?
         * if yes: tcp? udp? */
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (skb->nh.iph->protocol == IPPROTO_TCP) {
+               if (skb->nh.iph->protocol == IPPROTO_TCP)
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
-               } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+               else if (skb->nh.iph->protocol == IPPROTO_UDP)
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
-               } else { /* the stack should checksum non-tcp and non-udp
-                           packets on his own: NETIF_F_IP_CSUM */
+               else /* the stack should checksum non-tcp and non-udp
+                       packets on his own: NETIF_F_IP_CSUM */
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
-               }
        }
 }
 
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
                            struct spider_net_descr *descr,
                            struct sk_buff *skb)
 {
-       descr->buf_addr = pci_map_single(card->pdev, skb->data,
-                                        skb->len, PCI_DMA_BIDIRECTIONAL);
-       if (descr->buf_addr == DMA_ERROR_CODE) {
-               if (netif_msg_tx_err(card))
+       dma_addr_t buf;
+
+       buf = pci_map_single(card->pdev, skb->data,
+                            skb->len, PCI_DMA_BIDIRECTIONAL);
+       if (buf == DMA_ERROR_CODE) {
+               if (netif_msg_tx_err(card) && net_ratelimit())
                        pr_err("could not iommu-map packet (%p, %i). "
                                  "Dropping packet\n", skb->data, skb->len);
                return -ENOMEM;
        }
 
+       descr->buf_addr = buf;
        descr->buf_size = skb->len;
        descr->skb = skb;
        descr->data_status = 0;
 
-       /* make sure the above values are in memory before we change the
-        * status */
-       wmb();
-
        spider_net_set_txdescr_cmdstat(descr,skb);
 
        return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct spider_net_descr *descr;
        int result;
 
-       descr = spider_net_get_next_tx_descr(card);
+       spider_net_release_tx_chain(card, 0);
 
-       if (!descr) {
-               netif_stop_queue(netdev);
+       descr = spider_net_get_next_tx_descr(card);
 
-               descr = spider_net_get_next_tx_descr(card);
-               if (!descr)
-                       goto error;
-               else
-                       netif_start_queue(netdev);
-       }
+       if (!descr)
+               goto error;
 
        result = spider_net_prepare_tx_descr(card, descr, skb);
        if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        card->tx_chain.head = card->tx_chain.head->next;
 
-       /* make sure the status from spider_net_prepare_tx_descr is in
-        * memory before we check out the previous descriptor */
-       wmb();
-
        if (spider_net_get_descr_status(descr->prev) !=
-           SPIDER_NET_DESCR_CARDOWNED)
-               spider_net_kick_tx_dma(card, descr);
+           SPIDER_NET_DESCR_CARDOWNED) {
+               /* make sure the current descriptor is in memory. Then
+                * kicking it on again makes sense, if the previous is not
+                * card-owned anymore. Check the previous descriptor twice
+                * to omit an mb() in heavy traffic cases */
+               mb();
+               if (spider_net_get_descr_status(descr->prev) !=
+                   SPIDER_NET_DESCR_CARDOWNED)
+                       spider_net_kick_tx_dma(card, descr);
+       }
+
+       mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
 
        return NETDEV_TX_OK;
 
 error:
        card->netdev_stats.tx_dropped++;
-       return NETDEV_TX_LOCKED;
+       return NETDEV_TX_BUSY;
 }
 
 /**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  * @descr: descriptor to process
  * @card: card structure
+ * @napi: whether caller is in NAPI context
  *
  * returns 1 on success, 0 if no packet was passed to the stack
  *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  */
 static int
 spider_net_pass_skb_up(struct spider_net_descr *descr,
-                      struct spider_net_card *card)
+                      struct spider_net_card *card, int napi)
 {
        struct sk_buff *skb;
        struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 
        netdev = card->netdev;
 
-       /* check for errors in the data_error flag */
-       if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
-           netif_msg_rx_err(card))
-               pr_err("error in received descriptor found, "
-                      "data_status=x%08x, data_error=x%08x\n",
-                      data_status, data_error);
-
-       /* prepare skb, unmap descriptor */
-       skb = descr->skb;
-       pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
+       /* unmap descriptor */
+       pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
                         PCI_DMA_BIDIRECTIONAL);
 
        /* the cases we'll throw away the packet immediately */
-       if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
+       if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+               if (netif_msg_rx_err(card))
+                       pr_err("error in received descriptor found, "
+                              "data_status=x%08x, data_error=x%08x\n",
+                              data_status, data_error);
                return 0;
+       }
 
+       skb = descr->skb;
        skb->dev = netdev;
        skb_put(skb, descr->valid_size);
 
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 
        /* checksum offload */
        if (card->options.rx_csum) {
-               if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
-                    (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
+               if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
+                      SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
+                    !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
                        skb->ip_summed = CHECKSUM_NONE;
-       } else {
+       } else
                skb->ip_summed = CHECKSUM_NONE;
-       }
 
        if (data_status & SPIDER_NET_VLAN_PACKET) {
                /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
        }
 
        /* pass skb up to stack */
-       netif_receive_skb(skb);
+       if (napi)
+               netif_receive_skb(skb);
+       else
+               netif_rx_ni(skb);
 
        /* update netdevice statistics */
        card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 }
 
 /**
- * spider_net_decode_descr - processes an rx descriptor
+ * spider_net_decode_one_descr - processes an rx descriptor
  * @card: card structure
+ * @napi: whether caller is in NAPI context
  *
  * returns 1 if a packet has been sent to the stack, otherwise 0
  *
  * processes an rx descriptor by iommu-unmapping the data buffer and passing
- * the packet up to the stack
+ * the packet up to the stack. This function is called in softirq
+ * context, e.g. either bottom half from interrupt or NAPI polling context
  */
 static int
-spider_net_decode_one_descr(struct spider_net_card *card)
+spider_net_decode_one_descr(struct spider_net_card *card, int napi)
 {
        enum spider_net_descr_status status;
        struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
 
        if (status == SPIDER_NET_DESCR_CARDOWNED) {
                /* nothing in the descriptor yet */
-               return 0;
+               result=0;
+               goto out;
        }
 
        if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
-               /* not initialized yet, I bet chain->tail == chain->head
-                * and the ring is empty */
+               /* not initialized yet, the ring must be empty */
                spider_net_refill_rx_chain(card);
-               return 0;
+               spider_net_enable_rxdmac(card);
+               result=0;
+               goto out;
        }
 
-       /* descriptor definitively used -- move on head */
+       /* descriptor definitively used -- move on tail */
        chain->tail = descr->next;
 
        result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
                        pr_err("%s: dropping RX descriptor with state %d\n",
                               card->netdev->name, status);
                card->netdev_stats.rx_dropped++;
+               pci_unmap_single(card->pdev, descr->buf_addr,
+                                SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+               dev_kfree_skb_irq(descr->skb);
                goto refill;
        }
 
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
        }
 
        /* ok, we've got a packet in descr */
-       result = spider_net_pass_skb_up(descr, card);
+       result = spider_net_pass_skb_up(descr, card, napi);
 refill:
        spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
        /* change the descriptor state: */
-       spider_net_refill_rx_chain(card);
-
+       if (!napi)
+               spider_net_refill_rx_chain(card);
+out:
        return result;
 }
 
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
        packets_to_do = min(*budget, netdev->quota);
 
        while (packets_to_do) {
-               if (spider_net_decode_one_descr(card)) {
+               if (spider_net_decode_one_descr(card, 1)) {
                        packets_done++;
                        packets_to_do--;
                } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
 
        netdev->quota -= packets_done;
        *budget -= packets_done;
+       spider_net_refill_rx_chain(card);
 
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
@@ -1341,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
                             card->tx_chain.tail->bus_addr);
 }
 
+/**
+ * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
+ * @card: card structure
+ *
+ * spider_net_handle_rxram_full empties the RX ring so that spider can put
+ * more packets in it and empty its RX RAM. This is called in bottom half
+ * context
+ */
+static void
+spider_net_handle_rxram_full(struct spider_net_card *card)
+{
+       while (spider_net_decode_one_descr(card, 0))
+               ;
+       spider_net_enable_rxchtails(card);
+       spider_net_enable_rxdmac(card);
+       netif_rx_schedule(card->netdev);
+}
+
 /**
  * spider_net_handle_error_irq - handles errors raised by an interrupt
  * @card: card structure
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                                switch (i)
        {
        case SPIDER_NET_GTMFLLINT:
-               if (netif_msg_intr(card))
+               if (netif_msg_intr(card) && net_ratelimit())
                        pr_err("Spider TX RAM full\n");
                show_error = 0;
                break;
+       case SPIDER_NET_GRFDFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFCFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFBFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFAFLLINT: /* fallthrough */
        case SPIDER_NET_GRMFLLINT:
-               if (netif_msg_intr(card))
+               if (netif_msg_intr(card) && net_ratelimit())
                        pr_err("Spider RX RAM full, incoming packets "
-                              "might be discarded !\n");
-               netif_rx_schedule(card->netdev);
-               spider_net_enable_rxchtails(card);
-               spider_net_enable_rxdmac(card);
+                              "might be discarded!\n");
+               spider_net_rx_irq_off(card);
+               tasklet_schedule(&card->rxram_full_tl);
+               show_error = 0;
                break;
 
        /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                /* allrighty. tx from previous descr ok */
                show_error = 0;
                break;
-       /* case SPIDER_NET_GRFDFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFCFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFBFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFAFLLINT: print a message down there */
 
        /* chain end */
        case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                               "restarting DMAC %c.\n",
                               'D'+i-SPIDER_NET_GDDDCEINT);
                spider_net_refill_rx_chain(card);
+               spider_net_enable_rxdmac(card);
                show_error = 0;
                break;
 
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GDAINVDINT:
                /* could happen when rx chain is full */
                spider_net_refill_rx_chain(card);
+               spider_net_enable_rxdmac(card);
                show_error = 0;
                break;
 
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
        if (!status_reg)
                return IRQ_NONE;
 
-       if (status_reg & SPIDER_NET_TXINT)
-               spider_net_release_tx_chain(card, 0);
-
        if (status_reg & SPIDER_NET_RXINT ) {
                spider_net_rx_irq_off(card);
                netif_rx_schedule(netdev);
        }
 
-       /* we do this after rx and tx processing, as we want the tx chain
-        * processed to see, whether we should restart tx dma processing */
-       spider_net_handle_error_irq(card, status_reg);
+       if (status_reg & SPIDER_NET_ERRINT )
+               spider_net_handle_error_irq(card, status_reg);
 
        /* clear interrupt sources */
        spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
 /**
  * spider_net_download_firmware - loads firmware into the adapter
  * @card: card structure
- * @firmware: firmware pointer
+ * @firmware_ptr: pointer to firmware data
  *
- * spider_net_download_firmware loads the firmware opened by
- * spider_net_init_firmware into the adapter.
+ * spider_net_download_firmware loads the firmware data into the
+ * adapter. It assumes the length etc. to be allright.
  */
-static void
+static int
 spider_net_download_firmware(struct spider_net_card *card,
-                            const struct firmware *firmware)
+                            u8 *firmware_ptr)
 {
        int sequencer, i;
-       u32 *fw_ptr = (u32 *)firmware->data;
+       u32 *fw_ptr = (u32 *)firmware_ptr;
 
        /* stop sequencers */
        spider_net_write_reg(card, SPIDER_NET_GSINIT,
                             SPIDER_NET_STOP_SEQ_VALUE);
 
-       for (sequencer = 0; sequencer < 6; sequencer++) {
+       for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+            sequencer++) {
                spider_net_write_reg(card,
                                     SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
-               for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+               for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
                        spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                             sequencer * 8, *fw_ptr);
                        fw_ptr++;
                }
        }
 
+       if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
+               return -EIO;
+
        spider_net_write_reg(card, SPIDER_NET_GSINIT,
                             SPIDER_NET_RUN_SEQ_VALUE);
+
+       return 0;
 }
 
 /**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
 static int
 spider_net_init_firmware(struct spider_net_card *card)
 {
-       const struct firmware *firmware;
-       int err = -EIO;
+       struct firmware *firmware = NULL;
+       struct device_node *dn;
+       u8 *fw_prop = NULL;
+       int err = -ENOENT;
+       int fw_size;
+
+       if (request_firmware((const struct firmware **)&firmware,
+                            SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
+               if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
+                    netif_msg_probe(card) ) {
+                       pr_err("Incorrect size of spidernet firmware in " \
+                              "filesystem. Looking in host firmware...\n");
+                       goto try_host_fw;
+               }
+               err = spider_net_download_firmware(card, firmware->data);
 
-       if (request_firmware(&firmware,
-                            SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
-               if (netif_msg_probe(card))
-                       pr_err("Couldn't read in sequencer data file %s.\n",
-                              SPIDER_NET_FIRMWARE_NAME);
-               firmware = NULL;
-               goto out;
-       }
+               release_firmware(firmware);
+               if (err)
+                       goto try_host_fw;
 
-       if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
-               if (netif_msg_probe(card))
-                       pr_err("Invalid size of sequencer data file %s.\n",
-                              SPIDER_NET_FIRMWARE_NAME);
-               goto out;
+               goto done;
        }
 
-       spider_net_download_firmware(card, firmware);
+try_host_fw:
+       dn = pci_device_to_OF_node(card->pdev);
+       if (!dn)
+               goto out_err;
 
-       err = 0;
-out:
-       release_firmware(firmware);
+       fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
+       if (!fw_prop)
+               goto out_err;
+
+       if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
+            netif_msg_probe(card) ) {
+               pr_err("Incorrect size of spidernet firmware in " \
+                      "host firmware\n");
+               goto done;
+       }
 
+       err = spider_net_download_firmware(card, fw_prop);
+
+done:
+       return err;
+out_err:
+       if (netif_msg_probe(card))
+               pr_err("Couldn't find spidernet firmware in filesystem " \
+                      "or host firmware\n");
        return err;
 }
 
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
                             SPIDER_NET_CKRCTRL_RUN_VALUE);
 
        /* empty sequencer data */
-       for (sequencer = 0; sequencer < 6; sequencer++) {
+       for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+            sequencer++) {
                spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                     sequencer * 8, 0x0);
-               for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+               for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
                        spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                             sequencer * 8, 0x0);
                }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
        SET_NETDEV_DEV(netdev, &card->pdev->dev);
 
        pci_set_drvdata(card->pdev, netdev);
-       spin_lock_init(&card->intmask_lock);
+
+       atomic_set(&card->tx_chain_release,0);
+       card->rxram_full_tl.data = (unsigned long) card;
+       card->rxram_full_tl.func =
+               (void (*)(unsigned long)) spider_net_handle_rxram_full;
+       init_timer(&card->tx_timer);
+       card->tx_timer.function =
+               (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
+       card->tx_timer.data = (unsigned long) card;
        netdev->irq = card->pdev->irq;
 
        card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
index 22b2f2347351a281e80723ea4b748d16940ee3b1..5922b529a04866991ed3dfeb875c46396e0962ee 100644 (file)
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
 
 extern char spider_net_driver_name[];
 
-#define SPIDER_NET_MAX_MTU                     2308
+#define SPIDER_NET_MAX_FRAME                   2312
+#define SPIDER_NET_MAX_MTU                     2294
 #define SPIDER_NET_MIN_MTU                     64
 
 #define SPIDER_NET_RXBUF_ALIGN                 128
 
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT      64
+#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT      256
 #define SPIDER_NET_RX_DESCRIPTORS_MIN          16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX          256
+#define SPIDER_NET_RX_DESCRIPTORS_MAX          512
 
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT      64
+#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT      256
 #define SPIDER_NET_TX_DESCRIPTORS_MIN          16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX          256
+#define SPIDER_NET_TX_DESCRIPTORS_MAX          512
+
+#define SPIDER_NET_TX_TIMER                    20
 
 #define SPIDER_NET_RX_CSUM_DEFAULT             1
 
-#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
-#define SPIDER_NET_NAPI_WEIGHT 64
+#define SPIDER_NET_WATCHDOG_TIMEOUT            50*HZ
+#define SPIDER_NET_NAPI_WEIGHT                 64
 
-#define SPIDER_NET_FIRMWARE_LEN                1024
+#define SPIDER_NET_FIRMWARE_SEQS       6
+#define SPIDER_NET_FIRMWARE_SEQWORDS   1024
+#define SPIDER_NET_FIRMWARE_LEN                (SPIDER_NET_FIRMWARE_SEQS * \
+                                        SPIDER_NET_FIRMWARE_SEQWORDS * \
+                                        sizeof(u32))
 #define SPIDER_NET_FIRMWARE_NAME       "spider_fw.bin"
 
 /** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
 /** SCONFIG registers */
 #define SPIDER_NET_SCONFIG_IOACTE      0x00002810
 
-/** hardcoded register values */
-#define SPIDER_NET_INT0_MASK_VALUE     0x3f7fe3ff
-#define SPIDER_NET_INT1_MASK_VALUE     0xffffffff
+/** interrupt mask registers */
+#define SPIDER_NET_INT0_MASK_VALUE     0x3f7fe2c7
+#define SPIDER_NET_INT1_MASK_VALUE     0xffff7ff7
 /* no MAC aborts -> auto retransmission */
-#define SPIDER_NET_INT2_MASK_VALUE     0xfffffff1
+#define SPIDER_NET_INT2_MASK_VALUE     0xffef7ff1
 
-/* clear counter when interrupt sources are cleared
-#define SPIDER_NET_FRAMENUM_VALUE      0x0001f001 */
 /* we rely on flagged descriptor interrupts */
 #define SPIDER_NET_FRAMENUM_VALUE      0x00000000
 /* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
 #if 0
 #define SPIDER_NET_WOL_VALUE           0x00000000
 #endif
-#define SPIDER_NET_IPSECINIT_VALUE     0x00f000f8
+#define SPIDER_NET_IPSECINIT_VALUE     0x6f716f71
 
 /* pause frames: automatic, no upper retransmission count */
 /* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
 #define SPIDER_NET_RXINT       ( (1 << SPIDER_NET_GDAFDCINT) | \
                                  (1 << SPIDER_NET_GRMFLLINT) )
 
+#define SPIDER_NET_ERRINT      ( 0xffffffff & \
+                                 (~SPIDER_NET_TXINT) & \
+                                 (~SPIDER_NET_RXINT) )
+
 #define SPIDER_NET_GPREXEC             0x80000000
 #define SPIDER_NET_GPRDAT_MASK         0x0000ffff
 
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
 /* descr ready, descr is in middle of chain, get interrupt on completion */
 #define SPIDER_NET_DMAC_RX_CARDOWNED   0xa0800000
 
-/* multicast is no problem */
-#define SPIDER_NET_DATA_ERROR_MASK     0xffffbfff
-
 enum spider_net_descr_status {
        SPIDER_NET_DESCR_COMPLETE               = 0x00, /* used in rx and tx */
        SPIDER_NET_DESCR_RESPONSE_ERROR         = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
 
 struct spider_net_descr {
        /* as defined by the hardware */
-       dma_addr_t buf_addr;
+       u32 buf_addr;
        u32 buf_size;
-       dma_addr_t next_descr_addr;
+       u32 next_descr_addr;
        u32 dmac_cmd_status;
        u32 result_size;
        u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
 
        /* used in the driver */
        struct sk_buff *skb;
-       dma_addr_t bus_addr;
+       u32 bus_addr;
        struct spider_net_descr *next;
        struct spider_net_descr *prev;
 } __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
 };
 
 /* descriptor data_status bits */
-#define SPIDER_NET_RXIPCHK             29
-#define SPIDER_NET_TCPUDPIPCHK         28
-#define SPIDER_NET_DATA_STATUS_CHK_MASK        (1 << SPIDER_NET_RXIPCHK | \
-                                        1 << SPIDER_NET_TCPUDPIPCHK)
-
+#define SPIDER_NET_RX_IPCHK            29
+#define SPIDER_NET_RX_TCPCHK           28
 #define SPIDER_NET_VLAN_PACKET         21
+#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
+                                         (1 << SPIDER_NET_RX_TCPCHK) )
 
 /* descriptor data_error bits */
-#define SPIDER_NET_RXIPCHKERR          27
-#define SPIDER_NET_RXTCPCHKERR         26
-#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
-                                        1 << SPIDER_NET_RXTCPCHKERR)
+#define SPIDER_NET_RX_IPCHKERR         27
+#define SPIDER_NET_RX_RXTCPCHKERR      28
+
+#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
 
-/* the cases we don't pass the packet to the stack */
-#define SPIDER_NET_DESTROY_RX_FLAGS    0x70138000
+/* the cases we don't pass the packet to the stack.
+ * 701b8000 would be correct, but every packets gets that flag */
+#define SPIDER_NET_DESTROY_RX_FLAGS    0x700b8000
 
 #define SPIDER_NET_DESCR_SIZE          32
 
@@ -445,13 +451,16 @@ struct spider_net_card {
 
        struct spider_net_descr_chain tx_chain;
        struct spider_net_descr_chain rx_chain;
-       spinlock_t chain_lock;
+       atomic_t rx_chain_refill;
+       atomic_t tx_chain_release;
 
        struct net_device_stats netdev_stats;
 
        struct spider_net_options options;
 
        spinlock_t intmask_lock;
+       struct tasklet_struct rxram_full_tl;
+       struct timer_list tx_timer;
 
        struct work_struct tx_timeout_task;
        atomic_t tx_timeout_task_counter;
index d42e60ba74ceb3013b2b80170aeb9ceede60c352..a5bb0b7633af2576db48f9a347962b9394506db5 100644 (file)
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
        return 0;
 }
 
+static uint32_t
+spider_net_ethtool_get_tx_csum(struct net_device *netdev)
+{
+        return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int
+spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+        if (data)
+                netdev->features |= NETIF_F_HW_CSUM;
+        else
+                netdev->features &= ~NETIF_F_HW_CSUM;
+
+        return 0;
+}
+
 struct ethtool_ops spider_net_ethtool_ops = {
        .get_settings           = spider_net_ethtool_get_settings,
        .get_drvinfo            = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
        .nway_reset             = spider_net_ethtool_nway_reset,
        .get_rx_csum            = spider_net_ethtool_get_rx_csum,
        .set_rx_csum            = spider_net_ethtool_set_rx_csum,
+       .get_tx_csum            = spider_net_ethtool_get_tx_csum,
+       .set_tx_csum            = spider_net_ethtool_set_tx_csum,
 };
 
index eb86b059809b6ac4242bbf91d16d258e7e9a7ab5..f2d1dafde08773ad74107309b5e74ed9a0b60f16 100644 (file)
@@ -69,8 +69,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.47"
-#define DRV_MODULE_RELDATE     "Dec 28, 2005"
+#define DRV_MODULE_VERSION     "3.48"
+#define DRV_MODULE_RELDATE     "Jan 16, 2006"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
                tw32(0x7d00, val);
                if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-                       tg3_nvram_lock(tp);
+                       int err;
+
+                       err = tg3_nvram_lock(tp);
                        tg3_halt_cpu(tp, RX_CPU_BASE);
-                       tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
-                       tg3_nvram_unlock(tp);
+                       if (!err)
+                               tg3_nvram_unlock(tp);
                }
        }
 
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
        if (tp->tg3_flags & TG3_FLAG_NVRAM) {
                int i;
 
-               tw32(NVRAM_SWARB, SWARB_REQ_SET1);
-               for (i = 0; i < 8000; i++) {
-                       if (tr32(NVRAM_SWARB) & SWARB_GNT1)
-                               break;
-                       udelay(20);
+               if (tp->nvram_lock_cnt == 0) {
+                       tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+                       for (i = 0; i < 8000; i++) {
+                               if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+                                       break;
+                               udelay(20);
+                       }
+                       if (i == 8000) {
+                               tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+                               return -ENODEV;
+                       }
                }
-               if (i == 8000)
-                       return -ENODEV;
+               tp->nvram_lock_cnt++;
        }
        return 0;
 }
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
 /* tp->lock is held. */
 static void tg3_nvram_unlock(struct tg3 *tp)
 {
-       if (tp->tg3_flags & TG3_FLAG_NVRAM)
-               tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+               if (tp->nvram_lock_cnt > 0)
+                       tp->nvram_lock_cnt--;
+               if (tp->nvram_lock_cnt == 0)
+                       tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       }
 }
 
 /* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        void (*write_op)(struct tg3 *, u32, u32);
        int i;
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
                tg3_nvram_lock(tp);
+               /* No matching tg3_nvram_unlock() after this because
+                * chip reset below will undo the nvram lock.
+                */
+               tp->nvram_lock_cnt = 0;
+       }
 
        /*
         * We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
                       (offset == RX_CPU_BASE ? "RX" : "TX"));
                return -ENODEV;
        }
+
+       /* Clear firmware's nvram arbitration. */
+       if (tp->tg3_flags & TG3_FLAG_NVRAM)
+               tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
        return 0;
 }
 
@@ -4736,7 +4756,7 @@ struct fw_info {
 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
                                 int cpu_scratch_size, struct fw_info *info)
 {
-       int err, i;
+       int err, lock_err, i;
        void (*write_op)(struct tg3 *, u32, u32);
 
        if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
        /* It is possible that bootcode is still loading at this point.
         * Get the nvram lock first before halting the cpu.
         */
-       tg3_nvram_lock(tp);
+       lock_err = tg3_nvram_lock(tp);
        err = tg3_halt_cpu(tp, cpu_base);
-       tg3_nvram_unlock(tp);
+       if (!lock_err)
+               tg3_nvram_unlock(tp);
        if (err)
                goto out;
 
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                data[1] = 1;
        }
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
-               int irq_sync = 0;
+               int err, irq_sync = 0;
 
                if (netif_running(dev)) {
                        tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                tg3_full_lock(tp, irq_sync);
 
                tg3_halt(tp, RESET_KIND_SUSPEND, 1);
-               tg3_nvram_lock(tp);
+               err = tg3_nvram_lock(tp);
                tg3_halt_cpu(tp, RX_CPU_BASE);
                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                        tg3_halt_cpu(tp, TX_CPU_BASE);
-               tg3_nvram_unlock(tp);
+               if (!err)
+                       tg3_nvram_unlock(tp);
 
                if (tg3_test_registers(tp) != 0) {
                        etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
                tp->tg3_flags |= TG3_FLAG_NVRAM;
 
-               tg3_nvram_lock(tp);
+               if (tg3_nvram_lock(tp)) {
+                       printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
+                              "tg3_nvram_init failed.\n", tp->dev->name);
+                       return;
+               }
                tg3_enable_nvram_access(tp);
 
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
        if (offset > NVRAM_ADDR_MSK)
                return -EINVAL;
 
-       tg3_nvram_lock(tp);
+       ret = tg3_nvram_lock(tp);
+       if (ret)
+               return ret;
 
        tg3_enable_nvram_access(tp);
 
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 
                offset = offset + (pagesize - page_off);
 
-               /* Nvram lock released by tg3_nvram_read() above,
-                * so need to get it again.
-                */
-               tg3_nvram_lock(tp);
                tg3_enable_nvram_access(tp);
 
                /*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
        else {
                u32 grc_mode;
 
-               tg3_nvram_lock(tp);
+               ret = tg3_nvram_lock(tp);
+               if (ret)
+                       return ret;
 
                tg3_enable_nvram_access(tp);
                if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
index 890e1635996b56294fdaca69a9bb37e78ea7ce94..e8243305f0e807a9433ac9e0762dc5266ff116aa 100644 (file)
@@ -2275,6 +2275,7 @@ struct tg3 {
        dma_addr_t                      stats_mapping;
        struct work_struct              reset_task;
 
+       int                             nvram_lock_cnt;
        u32                             nvram_size;
        u32                             nvram_pagesize;
        u32                             nvram_jedecnum;
index ee866fd6957de2cf9ed0f3b75a9ca37971c4534b..a4c7ae94614d0b0dd9a3ab0a1de9332b1ba36405 100644 (file)
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
                int channel = fwrq->m;
                /* We should do a better check than that,
                 * based on the card capability !!! */
-               if((channel < 1) || (channel > 16)) {
+               if((channel < 1) || (channel > 14)) {
                        printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
                        rc = -EINVAL;
                } else {
                        readConfigRid(local, 1);
                        /* Yes ! We can set it !!! */
-                       local->config.channelSet = (u16)(channel - 1);
+                       local->config.channelSet = (u16) channel;
                        set_bit (FLAG_COMMIT, &local->flags);
                }
        }
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
 {
        struct airo_info *local = dev->priv;
        StatusRid status_rid;           /* Card status info */
+       int ch;
 
        readConfigRid(local, 1);
        if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
        else
                readStatusRid(local, &status_rid, 1);
 
-#ifdef WEXT_USECHANNELS
-       fwrq->m = ((int)status_rid.channel) + 1;
-       fwrq->e = 0;
-#else
-       {
-               int f = (int)status_rid.channel;
-               fwrq->m = frequency_list[f] * 100000;
+       ch = (int)status_rid.channel;
+       if((ch > 0) && (ch < 15)) {
+               fwrq->m = frequency_list[ch - 1] * 100000;
                fwrq->e = 1;
+       } else {
+               fwrq->m = ch;
+               fwrq->e = 0;
        }
-#endif
 
        return 0;
 }
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
        /* If none, we may want to get the one that was set */
 
        /* Push it out ! */
-       dwrq->length = status_rid.SSIDlen + 1;
+       dwrq->length = status_rid.SSIDlen;
        dwrq->flags = 1; /* active */
 
        return 0;
index f0ccfef664459fced62da435aec49d65da8dd11a..98a76f10a0f71f29f09ef031b000659dbbccb09c 100644 (file)
@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
        if (priv->new_SSID_size != 0) {
                memcpy(extra, priv->new_SSID, priv->new_SSID_size);
                extra[priv->new_SSID_size] = '\0';
-               dwrq->length = priv->new_SSID_size + 1;
+               dwrq->length = priv->new_SSID_size;
        } else {
                memcpy(extra, priv->SSID, priv->SSID_size);
                extra[priv->SSID_size] = '\0';
-               dwrq->length = priv->SSID_size + 1;
+               dwrq->length = priv->SSID_size;
        }
 
        dwrq->flags = !priv->connect_to_any_BSS; /* active */
index 56f41c714d3808e71ba53661f7c883131c46301b..c8f6286dd35fd29e514654a69be5067fc0aca469 100644 (file)
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
        depends on HOSTAP
        ---help---
        Configure Host AP driver to include support for firmware image
-       download. Current version supports only downloading to volatile, i.e.,
-       RAM memory. Flash upgrade is not yet supported.
+       download. This option by itself only enables downloading to the
+       volatile memory, i.e. the card RAM. This option is required to
+       support cards that don't have firmware in flash, such as D-Link
+       DWL-520 rev E and D-Link DWL-650 rev P.
 
-       Firmware image downloading needs user space tool, prism2_srec. It is
-       available from http://hostap.epitest.fi/.
+       Firmware image downloading needs a user space tool, prism2_srec.
+       It is available from http://hostap.epitest.fi/.
+
+config HOSTAP_FIRMWARE_NVRAM
+       bool "Support for non-volatile firmware download"
+       depends on HOSTAP_FIRMWARE
+       ---help---
+       Allow Host AP driver to write firmware images to the non-volatile
+       card memory, i.e. flash memory that survives power cycling.
+       Enable this option if you want to be able to change card firmware
+       permanently.
+
+       Firmware image downloading needs a user space tool, prism2_srec.
+       It is available from http://hostap.epitest.fi/.
 
 config HOSTAP_PLX
        tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
index 353ccb93134b79e70e38bb4a349cd681018c9576..b8e41a702c00ba5619edd864dbe35a9f5651ceb0 100644 (file)
@@ -1,4 +1,5 @@
-hostap-y := hostap_main.o
+hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
+            hostap_ioctl.o hostap_main.o hostap_proc.o 
 obj-$(CONFIG_HOSTAP) += hostap.o
 
 obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
index 5fac89b8ce3a036459b1a75d2e721c54d855e9a4..5e63765219fe91fa0bf08204d4a28f8096339f28 100644 (file)
@@ -1,6 +1,15 @@
 #ifndef HOSTAP_H
 #define HOSTAP_H
 
+#include <linux/ethtool.h>
+
+#include "hostap_wlan.h"
+#include "hostap_ap.h"
+
+static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+                                 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
+
 /* hostap.c */
 
 extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
 int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
                         u8 *body, size_t bodylen);
 int prism2_sta_deauth(local_info_t *local, u16 reason);
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked);
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked, int do_not_remove);
+
+
+/* hostap_ap.c */
+
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
+void ap_control_kickall(struct ap_data *ap);
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+                        struct ieee80211_crypt_data ***crypt);
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+                          struct iw_quality qual[], int buf_size,
+                          int aplist);
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
 
 
 /* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
 void hostap_info_process(local_info_t *local, struct sk_buff *skb);
 
 
+/* hostap_ioctl.c */
+
+extern const struct iw_handler_def hostap_iw_handler_def;
+extern struct ethtool_ops prism2_ethtool_ops;
+
+int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
 #endif /* HOSTAP_H */
index bf506f50d72295a900402d51d3bb4747c64abaee..1fc72fe511e9ddfb00a1c53a528f1d93edc49d5c 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef HOSTAP_80211_H
 #define HOSTAP_80211_H
 
+#include <linux/types.h>
+#include <net/ieee80211_crypt.h>
+
 struct hostap_ieee80211_mgmt {
        u16 frame_control;
        u16 duration;
index 4b13b76425c1c0642c47c9ce77d3bd567fd425e2..7e04dc94b3bc4e9d8eaeb674b6925b8c9fa05abf 100644 (file)
@@ -1,7 +1,18 @@
 #include <linux/etherdevice.h>
+#include <net/ieee80211_crypt.h>
 
 #include "hostap_80211.h"
 #include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
 
 void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
index 9d24f8a38ac525843772b41143db514a9f686ecc..4a85e63906f1554b9302c452faea74cb86f4a557 100644 (file)
@@ -1,3 +1,18 @@
+#include "hostap_80211.h"
+#include "hostap_common.h"
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
 void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
 {
        struct ieee80211_hdr_4addr *hdr;
index 9da94ab7f05f87e24eec2816ca5094e894906d01..753a1de6664bba6f8a90972a3ba13be751f20ec6 100644 (file)
  *   (8802.11: 5.5)
  */
 
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
 static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
                                                 DEF_INTS };
 module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
 }
 
 
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac)
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
 {
        struct mac_entry *entry;
 
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
 }
 
 
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac)
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
 {
        struct list_head *ptr;
        struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
 }
 
 
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
 {
        struct list_head *ptr, *n;
        struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
 }
 
 
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
-                              u8 *mac)
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
 {
        struct sta_info *sta;
        u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
 
-static void ap_control_kickall(struct ap_data *ap)
+void ap_control_kickall(struct ap_data *ap)
 {
        struct list_head *ptr, *n;
        struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
 }
 
 
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
-                                 struct iw_quality qual[], int buf_size,
-                                 int aplist)
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+                          struct iw_quality qual[], int buf_size,
+                          int aplist)
 {
        struct ap_data *ap = local->ap;
        struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
 
 /* Translate our list of Access Points & Stations to a card independant
  * format that the Wireless Tools will understand - Jean II */
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
 {
        struct hostap_interface *iface;
        local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
 }
 
 
-static int prism2_hostapd(struct ap_data *ap,
-                         struct prism2_hostapd_param *param)
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
 {
        switch (param->cmd) {
        case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
 }
 
 
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
-                               struct ieee80211_crypt_data ***crypt)
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+                        struct ieee80211_crypt_data ***crypt)
 {
        struct sta_info *sta;
 
index 6d00df69c2e3e9f57f28af33a8af725aebb8bae9..2fa2452b6b07de6d6c06f76b2e4b0742c90e7dc2 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef HOSTAP_AP_H
 #define HOSTAP_AP_H
 
+#include "hostap_80211.h"
+
 /* AP data structures for STAs */
 
 /* maximum number of frames to buffer per STA */
index 6f4fa9dc308f7e55b6c85006833b7a5e13416cb9..01624005d808f541ae30f762ab5e4dad1785ff8e 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef HOSTAP_COMMON_H
 #define HOSTAP_COMMON_H
 
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
 #define BIT(x) (1 << (x))
 
 #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
index 7ed3425d08c14a15f4a0c2802d90ec24ceef41a0..c090a5aebb58d55f108da5fbe83a92ea9e64e5b6 100644 (file)
 #define PRISM2_DOWNLOAD_SUPPORT
 #endif
 
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-/* Allow writing firmware images into flash, i.e., to non-volatile storage.
- * Before you enable this option, you should make absolutely sure that you are
- * using prism2_srec utility that comes with THIS version of the driver!
- * In addition, please note that it is possible to kill your card with
- * non-volatile download if you are using incorrect image. This feature has not
- * been fully tested, so please be careful with it. */
-/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
+/* Allow kernel configuration to enable non-volatile download support. */
+#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
+#define PRISM2_NON_VOLATILE_DOWNLOAD
+#endif
 
 /* Save low-level I/O for debugging. This should not be enabled in normal use.
  */
index 5aa998fdf1c48d276a72cd2b8ea2fc3297218b28..50f72d831cf40a8e89ffc37bbf96066825aa3484 100644 (file)
@@ -1,5 +1,8 @@
 /* Host AP driver Info Frame processing (part of hostap.o module) */
 
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
 
 /* Called only as a tasklet (software IRQ) */
 static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
index 2617d70bcda945814a96fbdf7fde59f79930cdb1..f3e0ce1ee037937a0c950e3b0c59b58b00b4d4bc 100644 (file)
@@ -1,11 +1,13 @@
 /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
 
-#ifdef in_atomic
-/* Get kernel_locked() for in_atomic() */
+#include <linux/types.h>
 #include <linux/smp_lock.h>
-#endif
 #include <linux/ethtool.h>
+#include <net/ieee80211_crypt.h>
 
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
 
 static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
 {
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
                 local->sta_fw_ver & 0xff);
 }
 
-static struct ethtool_ops prism2_ethtool_ops = {
+struct ethtool_ops prism2_ethtool_ops = {
        .get_drvinfo = prism2_get_drvinfo
 };
 
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
        (iw_handler) prism2_ioctl_priv_readmif,         /* 3 */
 };
 
-static const struct iw_handler_def hostap_iw_handler_def =
+const struct iw_handler_def hostap_iw_handler_def =
 {
        .num_standard   = sizeof(prism2_handler) / sizeof(iw_handler),
        .num_private    = sizeof(prism2_private_handler) / sizeof(iw_handler),
index 3d2ea61033be52fd5d0b047ba34de1d2abf20d7d..8dd4c4446a640bf6b3b14f89e054f62ef47a3ec9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kmod.h>
 #include <linux/rtnetlink.h>
 #include <linux/wireless.h>
+#include <linux/etherdevice.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
 #include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
 #define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
 
 
-/* hostap.c */
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked);
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked, int do_not_remove);
-
-/* hostap_ap.c */
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
-                                 struct iw_quality qual[], int buf_size,
-                                 int aplist);
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
-static int prism2_hostapd(struct ap_data *ap,
-                         struct prism2_hostapd_param *param);
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
-                               struct ieee80211_crypt_data ***crypt);
-static void ap_control_kickall(struct ap_data *ap);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac);
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac);
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
-                              u8 *mac);
-#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
-                                 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
-#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
-
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-
-/* FIX: these could be compiled separately and linked together to hostap.o */
-#include "hostap_ap.c"
-#include "hostap_info.c"
-#include "hostap_ioctl.c"
-#include "hostap_proc.c"
-#include "hostap_80211_rx.c"
-#include "hostap_80211_tx.c"
-
-
 struct net_device * hostap_add_interface(struct local_info *local,
                                         int type, int rtnl_locked,
                                         const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
 }
 
 
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked)
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked)
 {
        struct net_device *dev;
        struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
 }
 
 
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked, int do_not_remove)
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked, int do_not_remove)
 {
        unsigned long flags;
        struct list_head *ptr;
index a0a4cbd4937a182676939c336a8f57b60ade7c93..d1d8ce022e63af6d3060589e504adc3765df085d 100644 (file)
@@ -1,5 +1,12 @@
 /* /proc routines for Host AP driver */
 
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <net/ieee80211_crypt.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+
 #define PROC_LIMIT (PAGE_SIZE - 80)
 
 
index cfd8015594921a849dd00215d4190ca381b8cd91..87a54aa6f4dd2a1e2c1bd6cdbf393cb9f7aa4411 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef HOSTAP_WLAN_H
 #define HOSTAP_WLAN_H
 
+#include <linux/wireless.h>
+#include <linux/netdevice.h>
+#include <net/iw_handler.h>
+
 #include "hostap_config.h"
 #include "hostap_common.h"
 
index 7518384f34d964350796f41af2c0577272f4eac5..8bf02763b5c72fc5ae116b113835cac1b8544c7d 100644 (file)
@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
        return &priv->ieee->stats;
 }
 
-#if WIRELESS_EXT < 18
-/* Support for wpa_supplicant before WE-18, deprecated. */
-
-/* following definitions must match definitions in driver_ipw.c */
-
-#define IPW2100_IOCTL_WPA_SUPPLICANT           SIOCIWFIRSTPRIV+30
-
-#define IPW2100_CMD_SET_WPA_PARAM              1
-#define        IPW2100_CMD_SET_WPA_IE                  2
-#define IPW2100_CMD_SET_ENCRYPTION             3
-#define IPW2100_CMD_MLME                       4
-
-#define IPW2100_PARAM_WPA_ENABLED              1
-#define IPW2100_PARAM_TKIP_COUNTERMEASURES     2
-#define IPW2100_PARAM_DROP_UNENCRYPTED         3
-#define IPW2100_PARAM_PRIVACY_INVOKED          4
-#define IPW2100_PARAM_AUTH_ALGS                        5
-#define IPW2100_PARAM_IEEE_802_1X              6
-
-#define IPW2100_MLME_STA_DEAUTH                        1
-#define IPW2100_MLME_STA_DISASSOC              2
-
-#define IPW2100_CRYPT_ERR_UNKNOWN_ALG          2
-#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR         3
-#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED    4
-#define IPW2100_CRYPT_ERR_KEY_SET_FAILED       5
-#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED    6
-#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED     7
-
-#define        IPW2100_CRYPT_ALG_NAME_LEN              16
-
-struct ipw2100_param {
-       u32 cmd;
-       u8 sta_addr[ETH_ALEN];
-       union {
-               struct {
-                       u8 name;
-                       u32 value;
-               } wpa_param;
-               struct {
-                       u32 len;
-                       u8 reserved[32];
-                       u8 data[0];
-               } wpa_ie;
-               struct {
-                       u32 command;
-                       u32 reason_code;
-               } mlme;
-               struct {
-                       u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
-                       u8 set_tx;
-                       u32 err;
-                       u8 idx;
-                       u8 seq[8];      /* sequence counter (set: RX, get: TX) */
-                       u16 key_len;
-                       u8 key[0];
-               } crypt;
-
-       } u;
-};
-
-/* end of driver_ipw.c code */
-#endif                         /* WIRELESS_EXT < 18 */
-
 static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
 {
        /* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
        return 0;
 }
 
-#if WIRELESS_EXT < 18
-#define IW_AUTH_ALG_OPEN_SYSTEM                        0x1
-#define IW_AUTH_ALG_SHARED_KEY                 0x2
-#endif
-
 static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
 {
 
@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
        ipw2100_set_wpa_ie(priv, &frame, 0);
 }
 
-#if WIRELESS_EXT < 18
-static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
-{
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_crypt_data *crypt;
-       unsigned long flags;
-       int ret = 0;
-
-       switch (name) {
-       case IPW2100_PARAM_WPA_ENABLED:
-               ret = ipw2100_wpa_enable(priv, value);
-               break;
-
-       case IPW2100_PARAM_TKIP_COUNTERMEASURES:
-               crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
-               if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
-                       break;
-
-               flags = crypt->ops->get_flags(crypt->priv);
-
-               if (value)
-                       flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
-               else
-                       flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
-
-               crypt->ops->set_flags(flags, crypt->priv);
-
-               break;
-
-       case IPW2100_PARAM_DROP_UNENCRYPTED:{
-                       /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
-                       struct ieee80211_security sec = {
-                               .flags = SEC_ENABLED,
-                               .enabled = value,
-                       };
-                       priv->ieee->drop_unencrypted = value;
-                       /* We only change SEC_LEVEL for open mode. Others
-                        * are set by ipw_wpa_set_encryption.
-                        */
-                       if (!value) {
-                               sec.flags |= SEC_LEVEL;
-                               sec.level = SEC_LEVEL_0;
-                       } else {
-                               sec.flags |= SEC_LEVEL;
-                               sec.level = SEC_LEVEL_1;
-                       }
-                       if (priv->ieee->set_security)
-                               priv->ieee->set_security(priv->ieee->dev, &sec);
-                       break;
-               }
-
-       case IPW2100_PARAM_PRIVACY_INVOKED:
-               priv->ieee->privacy_invoked = value;
-               break;
-
-       case IPW2100_PARAM_AUTH_ALGS:
-               ret = ipw2100_wpa_set_auth_algs(priv, value);
-               break;
-
-       case IPW2100_PARAM_IEEE_802_1X:
-               priv->ieee->ieee802_1x = value;
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
-                      dev->name, name);
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
-{
-
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       int ret = 0;
-
-       switch (command) {
-       case IPW2100_MLME_STA_DEAUTH:
-               // silently ignore
-               break;
-
-       case IPW2100_MLME_STA_DISASSOC:
-               ipw2100_disassociate_bssid(priv);
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
-                      dev->name, command);
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
-                                 struct ipw2100_param *param, int plen)
-{
-
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_device *ieee = priv->ieee;
-       u8 *buf;
-
-       if (!ieee->wpa_enabled)
-               return -EOPNOTSUPP;
-
-       if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
-           (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
-               return -EINVAL;
-
-       if (param->u.wpa_ie.len) {
-               buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
-               if (buf == NULL)
-                       return -ENOMEM;
-
-               memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
-
-               kfree(ieee->wpa_ie);
-               ieee->wpa_ie = buf;
-               ieee->wpa_ie_len = param->u.wpa_ie.len;
-
-       } else {
-               kfree(ieee->wpa_ie);
-               ieee->wpa_ie = NULL;
-               ieee->wpa_ie_len = 0;
-       }
-
-       ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
-
-       return 0;
-}
-
-/* implementation borrowed from hostap driver */
-
-static int ipw2100_wpa_set_encryption(struct net_device *dev,
-                                     struct ipw2100_param *param,
-                                     int param_len)
-{
-       int ret = 0;
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_device *ieee = priv->ieee;
-       struct ieee80211_crypto_ops *ops;
-       struct ieee80211_crypt_data **crypt;
-
-       struct ieee80211_security sec = {
-               .flags = 0,
-       };
-
-       param->u.crypt.err = 0;
-       param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
-       if (param_len !=
-           (int)((char *)param->u.crypt.key - (char *)param) +
-           param->u.crypt.key_len) {
-               IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
-                              param->u.crypt.key_len);
-               return -EINVAL;
-       }
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
-               if (param->u.crypt.idx >= WEP_KEYS)
-                       return -EINVAL;
-               crypt = &ieee->crypt[param->u.crypt.idx];
-       } else {
-               return -EINVAL;
-       }
-
-       sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
-       if (strcmp(param->u.crypt.alg, "none") == 0) {
-               if (crypt) {
-                       sec.enabled = 0;
-                       sec.encrypt = 0;
-                       sec.level = SEC_LEVEL_0;
-                       sec.flags |= SEC_LEVEL;
-                       ieee80211_crypt_delayed_deinit(ieee, crypt);
-               }
-               goto done;
-       }
-       sec.enabled = 1;
-       sec.encrypt = 1;
-
-       ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
-               request_module("ieee80211_crypt_wep");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
-               request_module("ieee80211_crypt_tkip");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
-               request_module("ieee80211_crypt_ccmp");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       }
-       if (ops == NULL) {
-               IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
-                              dev->name, param->u.crypt.alg);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
-               ret = -EINVAL;
-               goto done;
-       }
-
-       if (*crypt == NULL || (*crypt)->ops != ops) {
-               struct ieee80211_crypt_data *new_crypt;
-
-               ieee80211_crypt_delayed_deinit(ieee, crypt);
-
-               new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
-               if (new_crypt == NULL) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-               new_crypt->ops = ops;
-               if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
-                       new_crypt->priv =
-                           new_crypt->ops->init(param->u.crypt.idx);
-
-               if (new_crypt->priv == NULL) {
-                       kfree(new_crypt);
-                       param->u.crypt.err =
-                           IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
-                       ret = -EINVAL;
-                       goto done;
-               }
-
-               *crypt = new_crypt;
-       }
-
-       if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
-           (*crypt)->ops->set_key(param->u.crypt.key,
-                                  param->u.crypt.key_len, param->u.crypt.seq,
-                                  (*crypt)->priv) < 0) {
-               IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
-               ret = -EINVAL;
-               goto done;
-       }
-
-       if (param->u.crypt.set_tx) {
-               ieee->tx_keyidx = param->u.crypt.idx;
-               sec.active_key = param->u.crypt.idx;
-               sec.flags |= SEC_ACTIVE_KEY;
-       }
-
-       if (ops->name != NULL) {
-
-               if (strcmp(ops->name, "WEP") == 0) {
-                       memcpy(sec.keys[param->u.crypt.idx],
-                              param->u.crypt.key, param->u.crypt.key_len);
-                       sec.key_sizes[param->u.crypt.idx] =
-                           param->u.crypt.key_len;
-                       sec.flags |= (1 << param->u.crypt.idx);
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_1;
-               } else if (strcmp(ops->name, "TKIP") == 0) {
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_2;
-               } else if (strcmp(ops->name, "CCMP") == 0) {
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_3;
-               }
-       }
-      done:
-       if (ieee->set_security)
-               ieee->set_security(ieee->dev, &sec);
-
-       /* Do not reset port if card is in Managed mode since resetting will
-        * generate new IEEE 802.11 authentication which may end up in looping
-        * with IEEE 802.1X.  If your hardware requires a reset after WEP
-        * configuration (for example... Prism2), implement the reset_port in
-        * the callbacks structures used to initialize the 802.11 stack. */
-       if (ieee->reset_on_keychange &&
-           ieee->iw_mode != IW_MODE_INFRA &&
-           ieee->reset_port && ieee->reset_port(dev)) {
-               IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
-{
-
-       struct ipw2100_param *param;
-       int ret = 0;
-
-       IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
-
-       if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
-               return -EINVAL;
-
-       param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
-       if (param == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(param, p->pointer, p->length)) {
-               kfree(param);
-               return -EFAULT;
-       }
-
-       switch (param->cmd) {
-
-       case IPW2100_CMD_SET_WPA_PARAM:
-               ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
-                                           param->u.wpa_param.value);
-               break;
-
-       case IPW2100_CMD_SET_WPA_IE:
-               ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
-               break;
-
-       case IPW2100_CMD_SET_ENCRYPTION:
-               ret = ipw2100_wpa_set_encryption(dev, param, p->length);
-               break;
-
-       case IPW2100_CMD_MLME:
-               ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
-                                      param->u.mlme.reason_code);
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME
-                      ": %s: Unknown WPA supplicant request: %d\n", dev->name,
-                      param->cmd);
-               ret = -EOPNOTSUPP;
-
-       }
-
-       if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-               ret = -EFAULT;
-
-       kfree(param);
-       return ret;
-}
-
-static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-       struct iwreq *wrq = (struct iwreq *)rq;
-       int ret = -1;
-       switch (cmd) {
-       case IPW2100_IOCTL_WPA_SUPPLICANT:
-               ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
-               return ret;
-
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return -EOPNOTSUPP;
-}
-#endif                         /* WIRELESS_EXT < 18 */
-
 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
                                    struct ethtool_drvinfo *info)
 {
@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        dev->open = ipw2100_open;
        dev->stop = ipw2100_close;
        dev->init = ipw2100_net_init;
-#if WIRELESS_EXT < 18
-       dev->do_ioctl = ipw2100_ioctl;
-#endif
        dev->get_stats = ipw2100_stats;
        dev->ethtool_ops = &ipw2100_ethtool_ops;
        dev->tx_timeout = ipw2100_tx_timeout;
@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
        return 0;
 }
 
-#if WIRELESS_EXT > 17
 /*
  * WE-18 WPA support
  */
@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
        }
        return 0;
 }
-#endif                         /* WIRELESS_EXT > 17 */
 
 /*
  *
@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
        NULL,                   /* SIOCWIWTHRSPY */
        ipw2100_wx_set_wap,     /* SIOCSIWAP */
        ipw2100_wx_get_wap,     /* SIOCGIWAP */
-#if WIRELESS_EXT > 17
        ipw2100_wx_set_mlme,    /* SIOCSIWMLME */
-#else
-       NULL,                   /* -- hole -- */
-#endif
        NULL,                   /* SIOCGIWAPLIST -- deprecated */
        ipw2100_wx_set_scan,    /* SIOCSIWSCAN */
        ipw2100_wx_get_scan,    /* SIOCGIWSCAN */
@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
        ipw2100_wx_get_encode,  /* SIOCGIWENCODE */
        ipw2100_wx_set_power,   /* SIOCSIWPOWER */
        ipw2100_wx_get_power,   /* SIOCGIWPOWER */
-#if WIRELESS_EXT > 17
        NULL,                   /* -- hole -- */
        NULL,                   /* -- hole -- */
        ipw2100_wx_set_genie,   /* SIOCSIWGENIE */
@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
        ipw2100_wx_set_encodeext,       /* SIOCSIWENCODEEXT */
        ipw2100_wx_get_encodeext,       /* SIOCGIWENCODEEXT */
        NULL,                   /* SIOCSIWPMKSA */
-#endif
 };
 
 #define IPW2100_PRIV_SET_MONITOR       SIOCIWFIRSTPRIV
index 819be2b6b7df03e87036217cf26d573cef4f3b43..4c28e332ecc33c656e0b9d55372db514cd9d7798 100644 (file)
@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
        IPW_DEBUG_HC("starting request direct scan!\n");
 
        if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
-               err = wait_event_interruptible(priv->wait_state,
-                                              !(priv->
-                                                status & (STATUS_SCANNING |
-                                                          STATUS_SCAN_ABORTING)));
-               if (err) {
-                       IPW_DEBUG_HC("aborting direct scan");
-                       goto done;
-               }
+               /* We should not sleep here; otherwise we will block most
+                * of the system (for instance, we hold rtnl_lock when we
+                * get here).
+                */
+               err = -EAGAIN;
+               goto done;
        }
        memset(&scan, 0, sizeof(scan));
 
index 135a156db25d9c07607d7ce0cae5cb9019636417..c5cd61c7f92774043c3184b2b5b23983748adda0 100644 (file)
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
        if (essid->length) {
                dwrq->flags = 1;        /* set ESSID to ON for Wireless Extensions */
                /* if it is to big, trunk it */
-               dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1);
+               dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
        } else {
                dwrq->flags = 0;
                dwrq->length = 0;
index 33d64d2ee53f7397fb3b9db833a6e6a89611f1ee..a8261d8454dd54edff637a164439fc213a7e822a 100644 (file)
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
 #endif
 
                        newskb->dev = skb->dev;
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
                        skb = newskb;
                }
        }
index 319180ca7e71325cc30350c85cd6bae57391e8da..7880d8c31aadc20abb96e554af606d4a337663f1 100644 (file)
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
        extra[IW_ESSID_MAX_SIZE] = '\0';
 
        /* Push it out ! */
-       dwrq->length = strlen(extra) + 1;
+       dwrq->length = strlen(extra);
        dwrq->flags = 1; /* active */
 
        return 0;
index 7e2039f52c49fdcef509b7529626d38282522fb9..cf373625fc7074e3bdfb2f2abf214856c4142c81 100644 (file)
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
        extra[IW_ESSID_MAX_SIZE] = '\0';
 
        /* Set the length */
-       wrqu->data.length = strlen(extra) + 1;
+       wrqu->data.length = strlen(extra);
 
        return 0;
 }
index 55e6e2d60d3a8de3a715e450ffee203b1d6af6d5..a4d7cc51ce0be8733f96d0bcc9f615c69ef7a027 100644 (file)
@@ -199,8 +199,7 @@ struct  fbcmap32 {
 #define FBIOPUTCMAP32  _IOW('F', 3, struct fbcmap32)
 #define FBIOGETCMAP32  _IOW('F', 4, struct fbcmap32)
 
-static int fbiogetputcmap(struct file *file, struct fb_info *info,
-               unsigned int cmd, unsigned long arg)
+static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
        struct fbcmap32 __user *argp = (void __user *)arg;
        struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
@@ -236,8 +235,7 @@ struct fbcursor32 {
 #define FBIOSCURSOR32  _IOW('F', 24, struct fbcursor32)
 #define FBIOGCURSOR32  _IOW('F', 25, struct fbcursor32)
 
-static int fbiogscursor(struct file *file, struct fb_info *info,
-               unsigned long arg)
+static int fbiogscursor(struct fb_info *info, unsigned long arg)
 {
        struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
        struct fbcursor32 __user *argp =  (void __user *)arg;
@@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info,
        return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
 }
 
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
-               unsigned long arg)
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
        case FBIOGTYPE:
index f753939013ed807b763631b25ea8a790447d0ab1..492828c3fe8fcf4c8a3d9992bea57ef51a44a89f 100644 (file)
@@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
                        struct fb_info *info,
                        int type, int fb_depth, unsigned long fb_size);
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
                unsigned long arg);
 
 #endif /* _SBUSLIB_H */
index cd9f11f1ef14b7ac8f44d2868f5a35b6c7548eca..4dc514aabfe7e8170c3820b587e260b3d6dd5131 100644 (file)
@@ -31,7 +31,7 @@
 
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
-struct __attribute__((__aligned__(0x400))) lppaca {
+struct lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,7 +129,7 @@ struct __attribute__((__aligned__(0x400))) lppaca {
 // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
 //=============================================================================
        u8      pmc_save_area[256];     // PMC interrupt Area           x00-xFF
-};
+} __attribute__((__aligned__(0x400)));
 
 extern struct lppaca lppaca[];
 
index 323924edb26a5f686361bfa6cb188f9b5b4d9641..a5363324cf959bcdf8ff21188aee90040e29622a 100644 (file)
@@ -228,6 +228,7 @@ extern void dump_stack(void);
        ntohs((addr).s6_addr16[6]), \
        ntohs((addr).s6_addr16[7])
 #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
+#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
 
 #if defined(__LITTLE_ENDIAN)
 #define HIPQUAD(addr) \
index c4f0793a0a98b5b1ae40154dcb51a5a8ad65da37..8531879eb4645424712e8c8893ea30102b2bf2a1 100644 (file)
@@ -18,13 +18,4 @@ struct ip6t_ah
 #define IP6T_AH_INV_LEN                0x02    /* Invert the sense of length. */
 #define IP6T_AH_INV_MASK       0x03    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_AH_H*/
index 01142b98a231444a6f45072032b377bf6c1c7406..a91b6abc8079e37a56d5713af4a9fc5cedce8c38 100644 (file)
@@ -7,15 +7,6 @@ struct ip6t_esp
        u_int8_t  invflags;                     /* Inverse flags */
 };
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 /* Values for "invflags" field in struct ip6t_esp. */
 #define IP6T_ESP_INV_SPI               0x01    /* Invert the sense of spi. */
 #define IP6T_ESP_INV_MASK      0x01    /* All possible flags. */
index 449a57eca7ddedb51af8d166a37ac83b26bbb867..66070a0d6dfce1a9ccf78b3dfc1eda9ee92a8b7a 100644 (file)
@@ -21,13 +21,4 @@ struct ip6t_frag
 #define IP6T_FRAG_INV_LEN      0x02    /* Invert the sense of length. */
 #define IP6T_FRAG_INV_MASK     0x03    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_FRAG_H*/
index e259b6275bd291df878a44fb7913007ea2c55c20..a07e36380ae8268492d2bca37f92edfe5a1ad488 100644 (file)
@@ -20,13 +20,4 @@ struct ip6t_opts
 #define IP6T_OPTS_INV_LEN      0x01    /* Invert the sense of length. */
 #define IP6T_OPTS_INV_MASK     0x01    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_OPTS_H*/
index f1070fbf2757070e41d60bf0639b1684e07201b2..52156023e8dbe7915e090ab05c8d826a4ae25022 100644 (file)
@@ -30,13 +30,4 @@ struct ip6t_rt
 #define IP6T_RT_INV_LEN                0x04    /* Invert the sense of length. */
 #define IP6T_RT_INV_MASK       0x07    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_RT_H*/
index e5fd66c5650b53fd1354aab69591ede2d6ea8794..ad7cc22bd424a8bb21c3055efd55e0a79861a5a9 100644 (file)
@@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
  *     Increase the headroom of an empty &sk_buff by reducing the tail
  *     room. This is only allowed for an empty buffer.
  */
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+static inline void skb_reserve(struct sk_buff *skb, int len)
 {
        skb->data += len;
        skb->tail += len;
index 03b766afdc395cab12fe65f083714667627b4b78..cd82c3e998e42860698716feadf6bad275e3ac76 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/types.h>
 #include <linux/list.h>
+#include <net/ieee80211.h>
 #include <asm/atomic.h>
 
 enum {
index d67c8393a343711ad367108e595a48136995c9f8..a2c5e0b88422af2e7e92a98e0b925a0d20d28869 100644 (file)
@@ -327,7 +327,7 @@ struct iw_handler_def
        __u16                   num_private_args;
 
        /* Array of handlers for standard ioctls
-        * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
+        * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
         */
        const iw_handler *      standard;
 
index f158fe67dd605fd8ca3dd5b941df04147d0d277f..dc5d0b2427cf4c359aa0eb961b9108808db8cfbc 100644 (file)
@@ -92,7 +92,9 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
                if (info->invflags & EBT_IP_PROTO)
                        return -EINVAL;
                if (info->protocol != IPPROTO_TCP &&
-                   info->protocol != IPPROTO_UDP)
+                   info->protocol != IPPROTO_UDP &&
+                   info->protocol != IPPROTO_SCTP &&
+                   info->protocol != IPPROTO_DCCP)
                         return -EINVAL;
        }
        if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
index a29c1232c4204e5a66c45e251039c14575930c97..0128fbbe23281241d2929ab9ffd6eb74d71d67d9 100644 (file)
@@ -95,7 +95,9 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
                       "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
                       NIPQUAD(ih->daddr), ih->tos, ih->protocol);
                if (ih->protocol == IPPROTO_TCP ||
-                   ih->protocol == IPPROTO_UDP) {
+                   ih->protocol == IPPROTO_UDP ||
+                   ih->protocol == IPPROTO_SCTP ||
+                   ih->protocol == IPPROTO_DCCP) {
                        struct tcpudphdr _ports, *pptr;
 
                        pptr = skb_header_pointer(skb, ih->ihl*4,
index a52665f752240a6e48300d89403d1706675c6ee5..9540946a48f35f9cbb89a0d82eacddcd2c4de3a3 100644 (file)
@@ -74,7 +74,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
  * filtering, filter is the array of filter instructions, and
  * len is the number of filter blocks in the array.
  */
 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
 {
        struct sock_filter *fentry;     /* We walk down these */
@@ -175,7 +174,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
                        continue;
                case BPF_LD|BPF_W|BPF_ABS:
                        k = fentry->k;
- load_w:
+load_w:
                        ptr = load_pointer(skb, k, 4, &tmp);
                        if (ptr != NULL) {
                                A = ntohl(*(u32 *)ptr);
@@ -184,7 +183,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
                        break;
                case BPF_LD|BPF_H|BPF_ABS:
                        k = fentry->k;
- load_h:
+load_h:
                        ptr = load_pointer(skb, k, 2, &tmp);
                        if (ptr != NULL) {
                                A = ntohs(*(u16 *)ptr);
@@ -374,7 +373,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                case BPF_JMP|BPF_JSET|BPF_K:
                case BPF_JMP|BPF_JSET|BPF_X:
                        /* for conditionals both must be safe */
-                       if (pc + ftest->jt + 1 >= flen ||
+                       if (pc + ftest->jt + 1 >= flen ||
                            pc + ftest->jf + 1 >= flen)
                                return -EINVAL;
                        break;
@@ -384,7 +383,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                }
        }
 
-        return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+       return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
 }
 
 /**
@@ -404,8 +403,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        int err;
 
        /* Make sure new filter is there and in the right amounts. */
-        if (fprog->filter == NULL)
-                return -EINVAL;
+       if (fprog->filter == NULL)
+               return -EINVAL;
 
        fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
        if (!fp)
index 281a632fa6a6eaf37085797076177f394b55327a..ea51f8d02eb8654d0533afd7bc5eb38a0e08c7fd 100644 (file)
@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
                }
        }
 
-       if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
+       if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
                memcpy(np->local_mac, ndev->dev_addr, 6);
 
        if (!np->local_ip) {
index 39063122fbb7383ab1b8cf2f569fac285ca4e15f..3827f881f4292312b9eba8eb30f81b915d96b4e9 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/wait.h>
+#include <linux/etherdevice.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
 #include <net/addrconf.h>
@@ -281,8 +282,8 @@ struct pktgen_dev {
         __u32 src_mac_count; /* How many MACs to iterate through */
         __u32 dst_mac_count; /* How many MACs to iterate through */
         
-        unsigned char dst_mac[6];
-        unsigned char src_mac[6];
+        unsigned char dst_mac[ETH_ALEN];
+        unsigned char src_mac[ETH_ALEN];
         
         __u32 cur_dst_mac_offset;
         __u32 cur_src_mac_offset;
@@ -594,16 +595,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
        seq_puts(seq, "     src_mac: ");
 
-       if ((pkt_dev->src_mac[0] == 0) && 
-           (pkt_dev->src_mac[1] == 0) && 
-           (pkt_dev->src_mac[2] == 0) && 
-           (pkt_dev->src_mac[3] == 0) && 
-           (pkt_dev->src_mac[4] == 0) && 
-           (pkt_dev->src_mac[5] == 0)) 
-
+       if (is_zero_ether_addr(pkt_dev->src_mac))
                for (i = 0; i < 6; i++) 
                        seq_printf(seq,  "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? "  " : ":");
-
        else 
                for (i = 0; i < 6; i++) 
                        seq_printf(seq,  "%02X%s", pkt_dev->src_mac[i], i == 5 ? "  " : ":");
@@ -1189,9 +1183,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
        }
        if (!strcmp(name, "dst_mac")) {
                char *v = valstr;
-                unsigned char old_dmac[6];
+               unsigned char old_dmac[ETH_ALEN];
                unsigned char *m = pkt_dev->dst_mac;
-                memcpy(old_dmac, pkt_dev->dst_mac, 6);
+               memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
                 
                len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
                 if (len < 0) { return len; }
@@ -1220,8 +1214,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
                }
 
                /* Set up Dest MAC */
-                if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) 
-                        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+               if (compare_ether_addr(old_dmac, pkt_dev->dst_mac))
+                       memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
                 
                sprintf(pg_result, "OK: dstmac");
                return count;
@@ -1560,17 +1554,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
         
         /* Default to the interface's mac if not explicitly set. */
 
-       if ((pkt_dev->src_mac[0] == 0) && 
-           (pkt_dev->src_mac[1] == 0) && 
-           (pkt_dev->src_mac[2] == 0) && 
-           (pkt_dev->src_mac[3] == 0) && 
-           (pkt_dev->src_mac[4] == 0) && 
-           (pkt_dev->src_mac[5] == 0)) {
+       if (is_zero_ether_addr(pkt_dev->src_mac))
+              memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
 
-              memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6);
-       }
         /* Set up Dest MAC */
-        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+       memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
 
         /* Set up pkt size */
         pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
index ce9cb77c5c29c272b7f6462c0b0a835cd1383454..2c77dafbd091f9c4627097b037b0a816bf18301e 100644 (file)
@@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
                                                 const unsigned char state)
 {
        unsigned int gap;
-       signed long new_head;
+       long new_head;
 
        if (av->dccpav_vec_len + packets > av->dccpav_buf_len)
                return -ENOBUFS;
index bcefe64b93177c7e4705065c6f909f46eb7a06e2..e5c5b3202f024a89c9162df7e8ef19308fc17778 100644 (file)
@@ -46,7 +46,6 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
 # matches
-obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
 obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
 obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
index c777abf16cb7a99573a2a9d841b96c9fe8f6f4e5..56794797d55b9eedce61b7542ceae00a627d68ae 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/in.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/interrupt.h>
 
 static DEFINE_RWLOCK(ip_ct_gre_lock);
 #define ASSERT_READ_LOCK(x)
index 709debcc69c92d2ee1403e8ad86cb61a2e946328..18ca8258a1c597c170fd718f5c8556a1eaefb382 100644 (file)
@@ -95,7 +95,10 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
 static int match(const struct sk_buff *skb,
                  const struct net_device *in,
                  const struct net_device *out,
-                 const void *matchinfo, int offset, int *hotdrop)
+                 const void *matchinfo,
+                 int offset,
+                 unsigned int protoff,
+                 int *hotdrop)
 {
        const struct ipt_policy_info *info = matchinfo;
        int ret;
@@ -113,7 +116,7 @@ static int match(const struct sk_buff *skb,
        return ret;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {
index f701a136a6ae010c48fae0204b7e028aafb90c8f..f2e82afc15b301653e789d6a90f42fa53b034f26 100644 (file)
@@ -240,9 +240,8 @@ static unsigned                     rt_hash_mask;
 static int                     rt_hash_log;
 static unsigned int            rt_hash_rnd;
 
-static struct rt_cache_stat *rt_cache_stat;
-#define RT_CACHE_STAT_INC(field)                                         \
-               (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
+static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
+#define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
                                struct rtable **res);
@@ -401,7 +400,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
                if (!cpu_possible(cpu))
                        continue;
                *pos = cpu+1;
-               return per_cpu_ptr(rt_cache_stat, cpu);
+               return &per_cpu(rt_cache_stat, cpu);
        }
        return NULL;
 }
@@ -414,7 +413,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                if (!cpu_possible(cpu))
                        continue;
                *pos = cpu+1;
-               return per_cpu_ptr(rt_cache_stat, cpu);
+               return &per_cpu(rt_cache_stat, cpu);
        }
        return NULL;
        
@@ -3160,10 +3159,6 @@ int __init ip_rt_init(void)
        ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
        ip_rt_max_size = (rt_hash_mask + 1) * 16;
 
-       rt_cache_stat = alloc_percpu(struct rt_cache_stat);
-       if (!rt_cache_stat)
-               return -ENOMEM;
-
        devinet_init();
        ip_fib_init();
 
@@ -3191,7 +3186,6 @@ int __init ip_rt_init(void)
        if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
            !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 
                                             proc_net_stat))) {
-               free_percpu(rt_cache_stat);
                return -ENOMEM;
        }
        rtstat_pde->proc_fops = &rt_cpu_seq_fops;
index dfb4f145a139af35f9ef2ebb61723d5ae3edd5d1..d328d59861438ae0b8bad815a3ef34cacf7de328 100644 (file)
@@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
 {
        struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
        seq_printf(seq,
-                  NIP6_FMT " %02x %02x %02x %02x %8s\n",
+                  NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
                   NIP6(ifp->addr),
                   ifp->idev->dev->ifindex,
                   ifp->prefix_len,
index 72bd08af2dfb0ac1c1f1d3fe0fe94cf7cdf9363d..840a33d3329696bfc5f9c2a67ca6e86b6336dca0 100644 (file)
@@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
        struct ac6_iter_state *state = ac6_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s " NIP6_FMT " %5d\n",
+                  "%-4d %-15s " NIP6_SEQFMT " %5d\n",
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->aca_addr),
                   im->aca_users);
index 4183c8dac7f6e16c448bd7e74f1f67ab649507fc..69cbe8a66d02ce7f97cc4163b216b45251972434 100644 (file)
@@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 {
        while(fl) {
                seq_printf(seq,
-                          "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n",
+                          "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n",
                           (unsigned)ntohl(fl->label),
                           fl->share,
                           (unsigned)fl->owner,
@@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n",
+               seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
                           "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
        else
                ip6fl_fl_seq_show(seq, v);
index 0e03eabfb9da3fea0541736e1dc742fb9052de36..6c05c7978bef2cf469111dfd891c5052a587dfac 100644 (file)
@@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", 
+                  "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", 
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->mca_addr),
                   im->mca_users, im->mca_flags,
@@ -2542,12 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, 
                           "%3s %6s "
-                          "%39s %39s %6s %6s\n", "Idx",
+                          "%32s %32s %6s %6s\n", "Idx",
                           "Device", "Multicast Address",
                           "Source Address", "INC", "EXC");
        } else {
                seq_printf(seq,
-                          "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n",
+                          "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n",
                           state->dev->ifindex, state->dev->name,
                           NIP6(state->im->mca_addr),
                           NIP6(psf->sf_addr),
index 663b4749820d7d5e9ffcb955970457e7e7098470..db6073c941633f3ccd602f646f286defb2074253 100644 (file)
@@ -4,7 +4,6 @@
 
 # Link order matters here.
 obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
index 80fe82669ce2616af5f99e866140dbf50d65a316..b4c153a53500f242b3fae2d67f67ef3ec22dbe53 100644 (file)
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- *     0       -> ignorable
- *     1       -> must drop the packet
- *     2       -> send ICMP PARM PROB regardless and drop packet
- *     3       -> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *     0       -> ignorable
+ *     1       -> must drop the packet
+ *     2       -> send ICMP PARM PROB regardless and drop packet
+ *     3       -> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *     0       -> invariant
- *     1       -> can change the routing
+ *     0       -> invariant
+ *     1       -> can change the routing
  *  (Type & 0x1F) Type
- *      0      -> Pad1 (only 1 byte!)
- *      1      -> PadN LENGTH info (total length = length + 2)
- *      C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
- *      5      -> RTALERT 2 x x
+ *           -> Pad1 (only 1 byte!)
+ *           -> PadN LENGTH info (total length = length + 2)
+ *     C0 | 2  -> JUMBO 4 x x x x ( xxxx > 64k )
+ *           -> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+       struct ipv6_opt_hdr _optsh, *oh;
+       const struct ip6t_opts *optinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       u8 _opttype, *tp = NULL;
+       u8 _optlen, *lp = NULL;
+       unsigned int optlen;
+
 #if HOPBYHOP
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
                return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-              /* Packet smaller than it's length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-                       optinfo->hdrlen, hdrlen,
-                       (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-                       &&
-               (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-              return ret;
+       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+       if (oh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(oh);
+       if (skb->len - ptr < hdrlen) {
+               /* Packet smaller than it's length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+       DEBUGP("len %02X %04X %02X ",
+              optinfo->hdrlen, hdrlen,
+              (!(optinfo->flags & IP6T_OPTS_LEN) ||
+               ((optinfo->hdrlen == hdrlen) ^
+                !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+       ret = (oh != NULL) &&
+             (!(optinfo->flags & IP6T_OPTS_LEN) ||
+              ((optinfo->hdrlen == hdrlen) ^
+               !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+       ptr += 2;
+       hdrlen -= 2;
+       if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+               return ret;
        } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
                DEBUGP("Not strict - not implemented");
        } else {
                DEBUGP("Strict ");
-               DEBUGP("#%d ",optinfo->optsnr);
-               for(temp=0; temp<optinfo->optsnr; temp++){
+               DEBUGP("#%d ", optinfo->optsnr);
+               for (temp = 0; temp < optinfo->optsnr; temp++) {
                        /* type field exists ? */
                        if (hdrlen < 1)
                                break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
                                break;
 
                        /* Type check */
-                       if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+                       if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
                                DEBUGP("Tbad %02X %02X\n",
                                       *tp,
-                                      (optinfo->opts[temp] & 0xFF00)>>8);
+                                      (optinfo->opts[temp] & 0xFF00) >> 8);
                                return 0;
                        } else {
                                DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
                }
                if (temp == optinfo->optsnr)
                        return ret;
-               else return 0;
+               else
+                       return 0;
        }
 
        return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *info,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *info,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_opts *optsinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+               DEBUGP("ip6t_opts: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+               return 0;
+       }
+       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+               DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+       return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+       ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);
index ddf5f571909c03b27b8038e11af598da276aea6e..27396ac0b9edb0a229c270ca92a648c325ae61fa 100644 (file)
@@ -27,45 +27,45 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
+       unsigned char eui64[8];
+       int i = 0;
 
-    unsigned char eui64[8];
-    int i=0;
-
-     if ( !(skb->mac.raw >= skb->head
-                && (skb->mac.raw + ETH_HLEN) <= skb->data)
-                && offset != 0) {
-                        *hotdrop = 1;
-                        return 0;
-                }
-    
-    memset(eui64, 0, sizeof(eui64));
-
-    if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
-      if (skb->nh.ipv6h->version == 0x6) { 
-         memcpy(eui64, eth_hdr(skb)->h_source, 3);
-         memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
-        eui64[3]=0xff;
-        eui64[4]=0xfe;
-        eui64[0] |= 0x02;
-
-        i=0;
-        while ((skb->nh.ipv6h->saddr.s6_addr[8+i] ==
-                        eui64[i]) && (i<8)) i++;
-
-        if ( i == 8 )
-               return 1;
-      }
-    }
-
-    return 0;
+       if (!(skb->mac.raw >= skb->head &&
+             (skb->mac.raw + ETH_HLEN) <= skb->data) &&
+           offset != 0) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       memset(eui64, 0, sizeof(eui64));
+
+       if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+               if (skb->nh.ipv6h->version == 0x6) {
+                       memcpy(eui64, eth_hdr(skb)->h_source, 3);
+                       memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
+                       eui64[3] = 0xff;
+                       eui64[4] = 0xfe;
+                       eui64[0] |= 0x02;
+
+                       i = 0;
+                       while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i])
+                              && (i < 8))
+                               i++;
+
+                       if (i == 8)
+                               return 1;
+               }
+       }
+
+       return 0;
 }
 
 static int
 ip6t_eui64_checkentry(const char *tablename,
-                  const void  *ip,
-                  void *matchinfo,
-                  unsigned int matchsize,
-                  unsigned int hook_mask)
+                     const void *ip,
+                     void *matchinfo,
+                     unsigned int matchsize,
+                     unsigned int hook_mask)
 {
        if (hook_mask
            & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
index a9964b946ed503409eeaa750d044ac82fa654bc3..4c14125a0e26d18ec775bcfcfe0b27661d98713e 100644 (file)
@@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+       int r = 0;
+       DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+              min, id, max);
+       r = (id >= min && id <= max) ^ invert;
+       DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+       return r;
 }
 
 static int
@@ -48,92 +48,91 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct frag_hdr _frag, *fh;
-       const struct ip6t_frag *fraginfo = matchinfo;
-       unsigned int ptr;
+       struct frag_hdr _frag, *fh;
+       const struct ip6t_frag *fraginfo = matchinfo;
+       unsigned int ptr;
 
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0)
                return 0;
 
        fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
-       if (fh == NULL){
+       if (fh == NULL) {
                *hotdrop = 1;
                return 0;
        }
 
-       DEBUGP("INFO %04X ", fh->frag_off);
-       DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
-       DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
-       DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
-       DEBUGP("ID %u %08X\n", ntohl(fh->identification),
-             ntohl(fh->identification));
-
-       DEBUGP("IPv6 FRAG id %02X ",
-                       (id_match(fraginfo->ids[0], fraginfo->ids[1],
-                           ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
-       DEBUGP("res %02X %02X%04X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
-               ntohs(fh->frag_off) & 0x6,
-                       !((fraginfo->flags & IP6T_FRAG_RES)
-                       && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
-       DEBUGP("first %02X %02X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_FST),
-               ntohs(fh->frag_off) & ~0x7,
-                       !((fraginfo->flags & IP6T_FRAG_FST)
-                       && (ntohs(fh->frag_off) & ~0x7)));
-       DEBUGP("mf %02X %02X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_MF),
-               ntohs(fh->frag_off) & IP6_MF,
-                       !((fraginfo->flags & IP6T_FRAG_MF)
-                       && !((ntohs(fh->frag_off) & IP6_MF))));
-       DEBUGP("last %02X %02X %02X\n", 
-                       (fraginfo->flags & IP6T_FRAG_NMF),
-               ntohs(fh->frag_off) & IP6_MF,
-                       !((fraginfo->flags & IP6T_FRAG_NMF)
-                       && (ntohs(fh->frag_off) & IP6_MF)));
-
-       return (fh != NULL)
-                       &&
-                       (id_match(fraginfo->ids[0], fraginfo->ids[1],
-                         ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_RES)
-                       && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_FST)
-                       && (ntohs(fh->frag_off) & ~0x7))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_MF)
-                       && !(ntohs(fh->frag_off) & IP6_MF))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_NMF)
-                       && (ntohs(fh->frag_off) & IP6_MF));
+       DEBUGP("INFO %04X ", fh->frag_off);
+       DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
+       DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
+       DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
+       DEBUGP("ID %u %08X\n", ntohl(fh->identification),
+              ntohl(fh->identification));
+
+       DEBUGP("IPv6 FRAG id %02X ",
+              (id_match(fraginfo->ids[0], fraginfo->ids[1],
+                        ntohl(fh->identification),
+                        !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
+       DEBUGP("res %02X %02X%04X %02X ",
+              (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
+              ntohs(fh->frag_off) & 0x6,
+              !((fraginfo->flags & IP6T_FRAG_RES)
+                && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
+       DEBUGP("first %02X %02X %02X ",
+              (fraginfo->flags & IP6T_FRAG_FST),
+              ntohs(fh->frag_off) & ~0x7,
+              !((fraginfo->flags & IP6T_FRAG_FST)
+                && (ntohs(fh->frag_off) & ~0x7)));
+       DEBUGP("mf %02X %02X %02X ",
+              (fraginfo->flags & IP6T_FRAG_MF),
+              ntohs(fh->frag_off) & IP6_MF,
+              !((fraginfo->flags & IP6T_FRAG_MF)
+                && !((ntohs(fh->frag_off) & IP6_MF))));
+       DEBUGP("last %02X %02X %02X\n",
+              (fraginfo->flags & IP6T_FRAG_NMF),
+              ntohs(fh->frag_off) & IP6_MF,
+              !((fraginfo->flags & IP6T_FRAG_NMF)
+                && (ntohs(fh->frag_off) & IP6_MF)));
+
+       return (fh != NULL)
+              &&
+              (id_match(fraginfo->ids[0], fraginfo->ids[1],
+                        ntohl(fh->identification),
+                        !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_RES)
+                && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_FST)
+                && (ntohs(fh->frag_off) & ~0x7))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_MF)
+                && !(ntohs(fh->frag_off) & IP6_MF))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_NMF)
+                && (ntohs(fh->frag_off) & IP6_MF));
 }
 
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *ip,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *ip,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_frag *fraginfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
-              DEBUGP("ip6t_frag: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
-              return 0;
-       }
-       if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
-              DEBUGP("ip6t_frag: unknown flags %X\n",
-                      fraginfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_frag *fraginfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
+               DEBUGP("ip6t_frag: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
+               return 0;
+       }
+       if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
+               DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match frag_match = {
@@ -145,12 +144,12 @@ static struct ip6t_match frag_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&frag_match);
+       return ip6t_register_match(&frag_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&frag_match);
+       ip6t_unregister_match(&frag_match);
 }
 
 module_init(init);
index ed8ded18bbd4f5970fadb49f180ef9bd275ea327..37a8474a7e0c911e1508aafc5a8e4f6f4d86c4a1 100644 (file)
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- *     0       -> ignorable
- *     1       -> must drop the packet
- *     2       -> send ICMP PARM PROB regardless and drop packet
- *     3       -> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *     0       -> ignorable
+ *     1       -> must drop the packet
+ *     2       -> send ICMP PARM PROB regardless and drop packet
+ *     3       -> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *     0       -> invariant
- *     1       -> can change the routing
+ *     0       -> invariant
+ *     1       -> can change the routing
  *  (Type & 0x1F) Type
- *      0      -> Pad1 (only 1 byte!)
- *      1      -> PadN LENGTH info (total length = length + 2)
- *      C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
- *      5      -> RTALERT 2 x x
+ *           -> Pad1 (only 1 byte!)
+ *           -> PadN LENGTH info (total length = length + 2)
+ *     C0 | 2  -> JUMBO 4 x x x x ( xxxx > 64k )
+ *           -> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+       struct ipv6_opt_hdr _optsh, *oh;
+       const struct ip6t_opts *optinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       u8 _opttype, *tp = NULL;
+       u8 _optlen, *lp = NULL;
+       unsigned int optlen;
+
 #if HOPBYHOP
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
                return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-              /* Packet smaller than it's length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-                       optinfo->hdrlen, hdrlen,
-                       (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-                       &&
-               (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-              return ret;
+       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+       if (oh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(oh);
+       if (skb->len - ptr < hdrlen) {
+               /* Packet smaller than it's length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+       DEBUGP("len %02X %04X %02X ",
+              optinfo->hdrlen, hdrlen,
+              (!(optinfo->flags & IP6T_OPTS_LEN) ||
+               ((optinfo->hdrlen == hdrlen) ^
+                !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+       ret = (oh != NULL) &&
+             (!(optinfo->flags & IP6T_OPTS_LEN) ||
+              ((optinfo->hdrlen == hdrlen) ^
+               !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+       ptr += 2;
+       hdrlen -= 2;
+       if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+               return ret;
        } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
                DEBUGP("Not strict - not implemented");
        } else {
                DEBUGP("Strict ");
-               DEBUGP("#%d ",optinfo->optsnr);
-               for(temp=0; temp<optinfo->optsnr; temp++){
+               DEBUGP("#%d ", optinfo->optsnr);
+               for (temp = 0; temp < optinfo->optsnr; temp++) {
                        /* type field exists ? */
                        if (hdrlen < 1)
                                break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
                                break;
 
                        /* Type check */
-                       if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+                       if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
                                DEBUGP("Tbad %02X %02X\n",
                                       *tp,
-                                      (optinfo->opts[temp] & 0xFF00)>>8);
+                                      (optinfo->opts[temp] & 0xFF00) >> 8);
                                return 0;
                        } else {
                                DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
                }
                if (temp == optinfo->optsnr)
                        return ret;
-               else return 0;
+               else
+                       return 0;
        }
 
        return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *entry,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_opts *optsinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+               DEBUGP("ip6t_opts: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+               return 0;
+       }
+       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+               DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+       return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+       ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);
index fda1ceaf5a2976c579ef22e7899791561a465bff..83ad6b272f7e6fbf201f09fc1b74a19a71abd0b2 100644 (file)
@@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb,
        len = skb->len - ptr;
        temp = 0;
 
-        while (ip6t_ext_hdr(nexthdr)) {
+       while (ip6t_ext_hdr(nexthdr)) {
                struct ipv6_opt_hdr _hdr, *hp;
-               int hdrlen;
+               int hdrlen;
 
                /* Is there enough space for the next ext header? */
-                if (len < (int)sizeof(struct ipv6_opt_hdr))
-                        return 0;
+               if (len < (int)sizeof(struct ipv6_opt_hdr))
+                       return 0;
                /* No more exthdr -> evaluate */
-                if (nexthdr == NEXTHDR_NONE) {
+               if (nexthdr == NEXTHDR_NONE) {
                        temp |= MASK_NONE;
                        break;
                }
                /* ESP -> evaluate */
-                if (nexthdr == NEXTHDR_ESP) {
+               if (nexthdr == NEXTHDR_ESP) {
                        temp |= MASK_ESP;
                        break;
                }
@@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb,
                BUG_ON(hp == NULL);
 
                /* Calculate the header length */
-                if (nexthdr == NEXTHDR_FRAGMENT) {
-                        hdrlen = 8;
-                } else if (nexthdr == NEXTHDR_AUTH)
-                        hdrlen = (hp->hdrlen+2)<<2;
-                else
-                        hdrlen = ipv6_optlen(hp);
+               if (nexthdr == NEXTHDR_FRAGMENT) {
+                       hdrlen = 8;
+               } else if (nexthdr == NEXTHDR_AUTH)
+                       hdrlen = (hp->hdrlen + 2) << 2;
+               else
+                       hdrlen = ipv6_optlen(hp);
 
                /* set the flag */
-               switch (nexthdr){
-                       case NEXTHDR_HOP:
-                               temp |= MASK_HOPOPTS;
-                               break;
-                       case NEXTHDR_ROUTING:
-                               temp |= MASK_ROUTING;
-                               break;
-                       case NEXTHDR_FRAGMENT:
-                               temp |= MASK_FRAGMENT;
-                               break;
-                       case NEXTHDR_AUTH:
-                               temp |= MASK_AH;
-                               break;
-                       case NEXTHDR_DEST:
-                               temp |= MASK_DSTOPTS;
-                               break;
-                       default:
-                               return 0;
-                               break;
+               switch (nexthdr) {
+               case NEXTHDR_HOP:
+                       temp |= MASK_HOPOPTS;
+                       break;
+               case NEXTHDR_ROUTING:
+                       temp |= MASK_ROUTING;
+                       break;
+               case NEXTHDR_FRAGMENT:
+                       temp |= MASK_FRAGMENT;
+                       break;
+               case NEXTHDR_AUTH:
+                       temp |= MASK_AH;
+                       break;
+               case NEXTHDR_DEST:
+                       temp |= MASK_DSTOPTS;
+                       break;
+               default:
+                       return 0;
+                       break;
                }
 
-                nexthdr = hp->nexthdr;
-                len -= hdrlen;
-                ptr += hdrlen;
+               nexthdr = hp->nexthdr;
+               len -= hdrlen;
+               ptr += hdrlen;
                if (ptr > skb->len)
                        break;
-        }
+       }
 
-       if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) )
+       if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP))
                temp |= MASK_PROTO;
 
        if (info->modeflag)
@@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename,
                return 0;
 
        /* invflags is 0 or 0xff in hard mode */
-       if ((!info->modeflag) && info->invflags != 0x00
-                             && info->invflags != 0xFF)
+       if ((!info->modeflag) && info->invflags != 0x00 &&
+           info->invflags != 0xFF)
                return 0;
 
        return 1;
@@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = {
        .me             = THIS_MODULE,
 };
 
-static int  __init ipv6header_init(void)
+static int __init ipv6header_init(void)
 {
        return ip6t_register_match(&ip6t_ipv6header_match);
 }
@@ -164,4 +164,3 @@ static void __exit ipv6header_exit(void)
 
 module_init(ipv6header_init);
 module_exit(ipv6header_exit);
-
index 5409b375b5121efbc3eea1ede04c7e80bafc471d..8c8a4c7ec9340cee4d778087d3e731b5649ce212 100644 (file)
@@ -36,14 +36,14 @@ match(const struct sk_buff *skb,
        if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
                return 0;
 
-       if(info->match & IP6T_OWNER_UID) {
-               if((skb->sk->sk_socket->file->f_uid != info->uid) ^
+       if (info->match & IP6T_OWNER_UID) {
+               if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
                    !!(info->invert & IP6T_OWNER_UID))
                        return 0;
        }
 
-       if(info->match & IP6T_OWNER_GID) {
-               if((skb->sk->sk_socket->file->f_gid != info->gid) ^
+       if (info->match & IP6T_OWNER_GID) {
+               if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
                    !!(info->invert & IP6T_OWNER_GID))
                        return 0;
        }
@@ -53,23 +53,23 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const void  *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
+          const void *ip,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
 {
        const struct ip6t_owner_info *info = matchinfo;
 
-        if (hook_mask
-            & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
-                printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
-                return 0;
-        }
+       if (hook_mask
+           & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
+               printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
+               return 0;
+       }
 
        if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info)))
                return 0;
 
-       if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) {
+       if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
                printk("ipt_owner: pid and sid matching "
                       "not supported anymore\n");
                return 0;
index 13fedad48c1d76a549aa4c467d75b571e7002a76..afe1cc4c18a5bffc734d6011b1dfddd395007158 100644 (file)
@@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb,
        return ret;
 }
 
-static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {
index 8465b4375855862f7f196ce260b0a64ef2d61343..8f82476dc89e6a46d6d644fb95abbcf568718ea1 100644 (file)
@@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+       int r = 0;
+       DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
+              invert ? '!' : ' ', min, id, max);
+       r = (id >= min && id <= max) ^ invert;
+       DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+       return r;
 }
 
 static int
@@ -50,87 +50,93 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_rt_hdr _route, *rh;
-       const struct ip6t_rt *rtinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       struct in6_addr *ap, _addr;
+       struct ipv6_rt_hdr _route, *rh;
+       const struct ip6t_rt *rtinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       struct in6_addr *ap, _addr;
 
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0)
                return 0;
 
-       rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
-       if (rh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(rh);
-       if (skb->len - ptr < hdrlen){
-              /* Pcket smaller than its length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
-       DEBUGP("TYPE %04X ", rh->type);
-       DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
-       DEBUGP("IPv6 RT segsleft %02X ",
-                       (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
-       DEBUGP("type %02X %02X %02X ",
-                       rtinfo->rt_type, rh->type, 
-                       (!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
-       DEBUGP("len %02X %04X %02X ",
-                       rtinfo->hdrlen, hdrlen,
-                       (!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
-       DEBUGP("res %02X %02X %02X ", 
-                       (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved,
-                       !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved)));
-
-       ret = (rh != NULL)
-                       &&
-                       (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
-               &&
-               (!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
-               &&
-                       (!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
+       rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
+       if (rh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(rh);
+       if (skb->len - ptr < hdrlen) {
+               /* Pcket smaller than its length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
+       DEBUGP("TYPE %04X ", rh->type);
+       DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
+
+       DEBUGP("IPv6 RT segsleft %02X ",
+              (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+                              rh->segments_left,
+                              !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
+       DEBUGP("type %02X %02X %02X ",
+              rtinfo->rt_type, rh->type,
+              (!(rtinfo->flags & IP6T_RT_TYP) ||
+               ((rtinfo->rt_type == rh->type) ^
+                !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
+       DEBUGP("len %02X %04X %02X ",
+              rtinfo->hdrlen, hdrlen,
+              (!(rtinfo->flags & IP6T_RT_LEN) ||
+               ((rtinfo->hdrlen == hdrlen) ^
+                !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
+       DEBUGP("res %02X %02X %02X ",
+              (rtinfo->flags & IP6T_RT_RES),
+              ((struct rt0_hdr *)rh)->reserved,
+              !((rtinfo->flags & IP6T_RT_RES) &&
+                (((struct rt0_hdr *)rh)->reserved)));
+
+       ret = (rh != NULL)
+             &&
+             (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+                             rh->segments_left,
+                             !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
+             &&
+             (!(rtinfo->flags & IP6T_RT_LEN) ||
+              ((rtinfo->hdrlen == hdrlen) ^
+               !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
+             &&
+             (!(rtinfo->flags & IP6T_RT_TYP) ||
+              ((rtinfo->rt_type == rh->type) ^
+               !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
 
        if (ret && (rtinfo->flags & IP6T_RT_RES)) {
                u_int32_t *rp, _reserved;
                rp = skb_header_pointer(skb,
-                                       ptr + offsetof(struct rt0_hdr, reserved),
-                                       sizeof(_reserved), &_reserved);
+                                       ptr + offsetof(struct rt0_hdr,
+                                                      reserved),
+                                       sizeof(_reserved),
+                                       &_reserved);
 
                ret = (*rp == 0);
        }
 
-       DEBUGP("#%d ",rtinfo->addrnr);
-       if ( !(rtinfo->flags & IP6T_RT_FST) ){
-              return ret;
+       DEBUGP("#%d ", rtinfo->addrnr);
+       if (!(rtinfo->flags & IP6T_RT_FST)) {
+               return ret;
        } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
                DEBUGP("Not strict ");
-               if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+               if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
                        DEBUGP("There isn't enough space\n");
                        return 0;
                } else {
                        unsigned int i = 0;
 
-                       DEBUGP("#%d ",rtinfo->addrnr);
-                       for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){
+                       DEBUGP("#%d ", rtinfo->addrnr);
+                       for (temp = 0;
+                            temp < (unsigned int)((hdrlen - 8) / 16);
+                            temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
                                                        + sizeof(struct rt0_hdr)
@@ -141,24 +147,26 @@ match(const struct sk_buff *skb,
                                BUG_ON(ap == NULL);
 
                                if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
-                                       DEBUGP("i=%d temp=%d;\n",i,temp);
+                                       DEBUGP("i=%d temp=%d;\n", i, temp);
                                        i++;
                                }
-                               if (i==rtinfo->addrnr) break;
+                               if (i == rtinfo->addrnr)
+                                       break;
                        }
                        DEBUGP("i=%d #%d\n", i, rtinfo->addrnr);
                        if (i == rtinfo->addrnr)
                                return ret;
-                       else return 0;
+                       else
+                               return 0;
                }
        } else {
                DEBUGP("Strict ");
-               if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+               if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
                        DEBUGP("There isn't enough space\n");
                        return 0;
                } else {
-                       DEBUGP("#%d ",rtinfo->addrnr);
-                       for(temp=0; temp<rtinfo->addrnr; temp++){
+                       DEBUGP("#%d ", rtinfo->addrnr);
+                       for (temp = 0; temp < rtinfo->addrnr; temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
                                                        + sizeof(struct rt0_hdr)
@@ -171,9 +179,11 @@ match(const struct sk_buff *skb,
                                        break;
                        }
                        DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr);
-                       if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16)))
+                       if ((temp == rtinfo->addrnr) &&
+                           (temp == (unsigned int)((hdrlen - 8) / 16)))
                                return ret;
-                       else return 0;
+                       else
+                               return 0;
                }
        }
 
@@ -183,32 +193,31 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *entry,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_rt *rtinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
-              DEBUGP("ip6t_rt: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
-              return 0;
-       }
-       if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
-              DEBUGP("ip6t_rt: unknown flags %X\n",
-                      rtinfo->invflags);
-              return 0;
-       }
-       if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && 
-                      (!(rtinfo->flags & IP6T_RT_TYP) || 
-                      (rtinfo->rt_type != 0) || 
-                      (rtinfo->invflags & IP6T_RT_INV_TYP)) ) {
-             DEBUGP("`--rt-type 0' required before `--rt-0-*'");
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_rt *rtinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
+               DEBUGP("ip6t_rt: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
+               return 0;
+       }
+       if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
+               DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
+               return 0;
+       }
+       if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
+           (!(rtinfo->flags & IP6T_RT_TYP) ||
+            (rtinfo->rt_type != 0) ||
+            (rtinfo->invflags & IP6T_RT_INV_TYP))) {
+               DEBUGP("`--rt-type 0' required before `--rt-0-*'");
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match rt_match = {
@@ -220,12 +229,12 @@ static struct ip6t_match rt_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&rt_match);
+       return ip6t_register_match(&rt_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&rt_match);
+       ip6t_unregister_match(&rt_match);
 }
 
 module_init(init);
index 3ac81cdd1211ce2c58fa4729b65fabe464097294..3e7466900bd4e1260728ed63759d60794a88c69e 100644 (file)
@@ -81,7 +81,7 @@ static int krxtimod(void *arg)
 
        for (;;) {
                unsigned long jif;
-               signed long timeout;
+               long timeout;
 
                /* deal with the server being asked to die */
                if (krxtimod_die) {
index 3b5ecd8e2401f85c9d2f14a64ec33d3ef38db14e..29975d99d864de1a1d8ea5b2b4a91a719c8b0861 100644 (file)
@@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
 {
        struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
-       signed long timeout;
+       long timeout;
 
        /* display header on line 1 */
        if (v == SEQ_START_TOKEN) {
@@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
        /* display one peer per line on subsequent lines */
        timeout = 0;
        if (!list_empty(&peer->timeout.link))
-               timeout = (signed long) peer->timeout.timo_jif -
-                       (signed long) jiffies;
+               timeout = (long) peer->timeout.timo_jif -
+                       (long) jiffies;
 
        seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
                   peer->trans->port,
@@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
 {
        struct rxrpc_connection *conn;
-       signed long timeout;
+       long timeout;
 
        conn = list_entry(v, struct rxrpc_connection, proc_link);
 
@@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
        /* display one conn per line on subsequent lines */
        timeout = 0;
        if (!list_empty(&conn->timeout.link))
-               timeout = (signed long) conn->timeout.timo_jif -
-                       (signed long) jiffies;
+               timeout = (long) conn->timeout.timo_jif -
+                       (long) jiffies;
 
        seq_printf(m,
                   "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
index 5b3a3e48ed92e64d8d45f05fc32e820a5f701024..1641db33a994020f557cee3d42ab0cc5505a2743 100644 (file)
@@ -228,14 +228,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
        }
        sch_tree_unlock(sch);
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
-               int band = q->prio2band[i];
-               if (q->queues[band] == &noop_qdisc) {
+       for (i=0; i<q->bands; i++) {
+               if (q->queues[i] == &noop_qdisc) {
                        struct Qdisc *child;
                        child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
                        if (child) {
                                sch_tree_lock(sch);
-                               child = xchg(&q->queues[band], child);
+                               child = xchg(&q->queues[i], child);
 
                                if (child != &noop_qdisc)
                                        qdisc_destroy(child);
index 8734bb7280e36dadd1e1bc87aeff3b8401d85ceb..86d8da0cbd027262024277ea1ba7fe14164f7d4f 100644 (file)
@@ -144,6 +144,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
                    (iph->protocol == IPPROTO_TCP ||
                     iph->protocol == IPPROTO_UDP ||
+                    iph->protocol == IPPROTO_SCTP ||
+                    iph->protocol == IPPROTO_DCCP ||
                     iph->protocol == IPPROTO_ESP))
                        h2 ^= *(((u32*)iph) + iph->ihl);
                break;
@@ -155,6 +157,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
                if (iph->nexthdr == IPPROTO_TCP ||
                    iph->nexthdr == IPPROTO_UDP ||
+                   iph->nexthdr == IPPROTO_SCTP ||
+                   iph->nexthdr == IPPROTO_DCCP ||
                    iph->nexthdr == IPPROTO_ESP)
                        h2 ^= *(u32*)&iph[1];
                break;
index e9086e95a31f13686a3db0cde9946f9a72091332..fd654399878854f22715cf1539a9fea4ec721178 100644 (file)
@@ -69,13 +69,14 @@ struct sbus_dma_info {
 };
 #endif
 
+struct snd_cs4231;
 struct cs4231_dma_control {
         void           (*prepare)(struct cs4231_dma_control *dma_cont, int dir);
         void           (*enable)(struct cs4231_dma_control *dma_cont, int on);
         int            (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len);
         unsigned int   (*address)(struct cs4231_dma_control *dma_cont);
         void           (*reset)(struct snd_cs4231 *chip); 
-        void           (*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); 
+        void           (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); 
 #ifdef EBUS_SUPPORT
        struct          ebus_dma_info   ebus_info;
 #endif