]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Wed, 8 Oct 2008 21:56:41 +0000 (14:56 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 8 Oct 2008 21:56:41 +0000 (14:56 -0700)
Conflicts:

drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c

1  2 
MAINTAINERS
drivers/net/e1000e/e1000.h
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/e1000e/param.c
net/core/dev.c
net/core/rtnetlink.c
net/ipv4/tcp_input.c

diff --combined MAINTAINERS
index e6aa6aa789f54ac1b5d626297851502b49f3baec,8dae4555f10e1b91f0c2eafb84f04fbf71812e55..e6e481483622246650f7450f3270a11661ee161e
@@@ -1048,13 -1048,6 +1048,13 @@@ L:    cbe-oss-dev@ozlabs.or
  W:    http://www.ibm.com/developerworks/power/cell/
  S:    Supported
  
 +CISCO 10G ETHERNET DRIVER
 +P:    Scott Feldman
 +M:    scofeldm@cisco.com
 +P:    Joe Eykholt
 +M:    jeykholt@cisco.com
 +S:    Supported
 +
  CFAG12864B LCD DRIVER
  P:    Miguel Ojeda Sandonis
  M:    miguel.ojeda.sandonis@gmail.com
@@@ -1205,9 -1198,7 +1205,7 @@@ M:      hpa@zytor.co
  S:    Maintained
  
  CPUSETS
- P:    Paul Jackson
  P:    Paul Menage
- M:    pj@sgi.com
  M:    menage@google.com
  L:    linux-kernel@vger.kernel.org
  W:    http://www.bullopensource.org/cpuset/
@@@ -2328,12 -2319,6 +2326,12 @@@ L:    video4linux-list@redhat.co
  W:    http://www.ivtvdriver.org
  S:    Maintained
  
 +JME NETWORK DRIVER
 +P:    Guo-Fu Tseng
 +M:    cooldavid@cooldavid.org
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +
  JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
  P:    David Woodhouse
  M:    dwmw2@infradead.org
@@@ -2719,6 -2704,7 +2717,7 @@@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Se
  P:    Michael Kerrisk
  M:    mtk.manpages@gmail.com
  W:    http://www.kernel.org/doc/man-pages
+ L:    linux-man@vger.kernel.org
  S:    Supported
  
  MARVELL LIBERTAS WIRELESS DRIVER
@@@ -3398,13 -3384,6 +3397,13 @@@ M:    linux-driver@qlogic.co
  L:    netdev@vger.kernel.org
  S:    Supported
  
 +QLOGIC QLGE 10Gb ETHERNET DRIVER
 +P:    Ron Mercer
 +M:    linux-driver@qlogic.com
 +M:    ron.mercer@qlogic.com
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +
  QNX4 FILESYSTEM
  P:    Anders Larsen
  M:    al@alarsen.net
index 0a1916b0419d0d60d154beea4a46d62c1c51c31d,5ea6b60fa3774a5ed6673949bd8e7ea0ee780170..c55de1c027af9578687a553195bfb911c65a72f9
@@@ -62,11 -62,6 +62,11 @@@ struct e1000_info
        e_printk(KERN_NOTICE, adapter, format, ## arg)
  
  
 +/* Interrupt modes, as used by the IntMode paramter */
 +#define E1000E_INT_MODE_LEGACY                0
 +#define E1000E_INT_MODE_MSI           1
 +#define E1000E_INT_MODE_MSIX          2
 +
  /* Tx/Rx descriptor defines */
  #define E1000_DEFAULT_TXD             256
  #define E1000_MAX_TXD                 4096
@@@ -100,11 -95,9 +100,11 @@@ enum e1000_boards 
        board_82571,
        board_82572,
        board_82573,
 +      board_82574,
        board_80003es2lan,
        board_ich8lan,
        board_ich9lan,
 +      board_ich10lan,
  };
  
  struct e1000_queue_stats {
@@@ -153,12 -146,6 +153,12 @@@ struct e1000_ring 
        /* array of buffer information structs */
        struct e1000_buffer *buffer_info;
  
 +      char name[IFNAMSIZ + 5];
 +      u32 ims_val;
 +      u32 itr_val;
 +      u16 itr_register;
 +      int set_itr;
 +
        struct sk_buff *rx_skb_top;
  
        struct e1000_queue_stats stats;
@@@ -270,7 -257,6 +270,6 @@@ struct e1000_adapter 
        struct net_device *netdev;
        struct pci_dev *pdev;
        struct net_device_stats net_stats;
-       spinlock_t stats_lock;      /* prevent concurrent stats updates */
  
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
        u32 test_icr;
  
        u32 msg_enable;
 +      struct msix_entry *msix_entries;
 +      int int_mode;
 +      u32 eiac_mask;
  
        u32 eeprom_wol;
        u32 wol;
        unsigned long led_status;
  
        unsigned int flags;
+       struct work_struct downshift_task;
+       struct work_struct update_phy_task;
  };
  
  struct e1000_info {
  #define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
  #define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
  #define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
+ #define FLAG_READ_ONLY_NVM                (1 << 8)
  #define FLAG_IS_ICH                       (1 << 9)
 +#define FLAG_HAS_MSIX                     (1 << 10)
  #define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
  #define FLAG_IS_QUAD_PORT_A               (1 << 12)
  #define FLAG_IS_QUAD_PORT                 (1 << 13)
@@@ -381,8 -366,6 +383,8 @@@ extern int e1000e_setup_tx_resources(st
  extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
  extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
  extern void e1000e_update_stats(struct e1000_adapter *adapter);
 +extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 +extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
  
  extern unsigned int copybreak;
  
@@@ -391,10 -374,8 +393,10 @@@ extern char *e1000e_get_hw_dev_name(str
  extern struct e1000_info e1000_82571_info;
  extern struct e1000_info e1000_82572_info;
  extern struct e1000_info e1000_82573_info;
 +extern struct e1000_info e1000_82574_info;
  extern struct e1000_info e1000_ich8_info;
  extern struct e1000_info e1000_ich9_info;
 +extern struct e1000_info e1000_ich10_info;
  extern struct e1000_info e1000_es2_info;
  
  extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@@ -406,6 -387,7 +408,7 @@@ extern bool e1000e_enable_mng_pass_thru
  extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
  extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
  
+ extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
  extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
                                                 bool state);
  extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
@@@ -467,13 -449,10 +470,13 @@@ extern s32 e1000e_get_cable_length_m88(
  extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
  extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
  extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
 +extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
  extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
  extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
  extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
  extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
 +extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
 +extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
  extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
  extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
  extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
@@@ -544,12 -523,7 +547,12 @@@ static inline s32 e1000_get_phy_info(st
        return hw->phy.ops.get_phy_info(hw);
  }
  
 -extern bool e1000e_check_mng_mode(struct e1000_hw *hw);
 +static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
 +{
 +      return hw->mac.ops.check_mng_mode(hw);
 +}
 +
 +extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
  extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
  extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
  
index 52b762eb17458985c670705507667c1958db8816,33a3ff17b5d07d80556dd394efed6d692f7faf05..70c11c811a08e3534671ac6554e50767b1ae63be
@@@ -432,6 -432,10 +432,10 @@@ static void e1000_get_regs(struct net_d
        regs_buff[11] = er32(TIDV);
  
        regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
+       /* ethtool doesn't use anything past this point, so all this
+        * code is likely legacy junk for apps that may or may not
+        * exist */
        if (hw->phy.type == e1000_phy_m88) {
                e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
                regs_buff[13] = (u32)phy_data; /* cable length */
                regs_buff[22] = adapter->phy_stats.receive_errors;
                regs_buff[23] = regs_buff[13]; /* mdix mode */
        }
-       regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+       regs_buff[21] = 0; /* was idle_errors */
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
        regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
        regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
@@@ -529,6 -533,9 +533,9 @@@ static int e1000_set_eeprom(struct net_
        if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
                return -EFAULT;
  
+       if (adapter->flags & FLAG_READ_ONLY_NVM)
+               return -EINVAL;
        max_len = hw->nvm.word_size * 2;
  
        first_word = eeprom->offset >> 1;
         * and flush shadow RAM for 82573 controllers
         */
        if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
 +                             (hw->mac.type == e1000_82574) ||
                               (hw->mac.type == e1000_82573)))
                e1000e_update_nvm_checksum(hw);
  
@@@ -780,10 -786,8 +787,10 @@@ static int e1000_reg_test(struct e1000_
                toggle = 0x7FFFF3FF;
                break;
        case e1000_82573:
 +      case e1000_82574:
        case e1000_ich8lan:
        case e1000_ich9lan:
 +      case e1000_ich10lan:
                toggle = 0x7FFFF033;
                break;
        default:
        REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
        for (i = 0; i < mac->rar_entry_count; i++)
                REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
 -                                     0x8003FFFF, 0xFFFFFFFF);
 +                                     ((mac->type == e1000_ich10lan) ?
 +                                         0x8007FFFF : 0x8003FFFF),
 +                                     0xFFFFFFFF);
  
        for (i = 0; i < mac->mta_reg_count; i++)
                REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@@ -889,18 -891,10 +896,18 @@@ static int e1000_intr_test(struct e1000
        u32 shared_int = 1;
        u32 irq = adapter->pdev->irq;
        int i;
 +      int ret_val = 0;
 +      int int_mode = E1000E_INT_MODE_LEGACY;
  
        *data = 0;
  
 -      /* NOTE: we don't test MSI interrupts here, yet */
 +      /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
 +      if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
 +              int_mode = adapter->int_mode;
 +              e1000e_reset_interrupt_capability(adapter);
 +              adapter->int_mode = E1000E_INT_MODE_LEGACY;
 +              e1000e_set_interrupt_capability(adapter);
 +      }
        /* Hook up test interrupt handler just for this test */
        if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
                         netdev)) {
        } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
                 netdev->name, netdev)) {
                *data = 1;
 -              return -1;
 +              ret_val = -1;
 +              goto out;
        }
        e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
  
  
        /* Test each interrupt */
        for (i = 0; i < 10; i++) {
 -              if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
 -                      continue;
 -
                /* Interrupt to test */
                mask = 1 << i;
  
 +              if (adapter->flags & FLAG_IS_ICH) {
 +                      switch (mask) {
 +                      case E1000_ICR_RXSEQ:
 +                              continue;
 +                      case 0x00000100:
 +                              if (adapter->hw.mac.type == e1000_ich8lan ||
 +                                  adapter->hw.mac.type == e1000_ich9lan)
 +                                      continue;
 +                              break;
 +                      default:
 +                              break;
 +                      }
 +              }
 +
                if (!shared_int) {
                        /*
                         * Disable the interrupt to be reported in
        /* Unhook test interrupt handler */
        free_irq(irq, netdev);
  
 -      return *data;
 +out:
 +      if (int_mode == E1000E_INT_MODE_MSIX) {
 +              e1000e_reset_interrupt_capability(adapter);
 +              adapter->int_mode = int_mode;
 +              e1000e_set_interrupt_capability(adapter);
 +      }
 +
 +      return ret_val;
  }
  
  static void e1000_free_desc_rings(struct e1000_adapter *adapter)
@@@ -1787,13 -1762,11 +1794,13 @@@ static void e1000_led_blink_callback(un
  static int e1000_phys_id(struct net_device *netdev, u32 data)
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_hw *hw = &adapter->hw;
  
        if (!data)
                data = INT_MAX;
  
 -      if (adapter->hw.phy.type == e1000_phy_ife) {
 +      if ((hw->phy.type == e1000_phy_ife) ||
 +          (hw->mac.type == e1000_82574)) {
                if (!adapter->blink_timer.function) {
                        init_timer(&adapter->blink_timer);
                        adapter->blink_timer.function =
                mod_timer(&adapter->blink_timer, jiffies);
                msleep_interruptible(data * 1000);
                del_timer_sync(&adapter->blink_timer);
 -              e1e_wphy(&adapter->hw,
 -                                  IFE_PHY_SPECIAL_CONTROL_LED, 0);
 +              if (hw->phy.type == e1000_phy_ife)
 +                      e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
        } else {
 -              e1000e_blink_led(&adapter->hw);
 +              e1000e_blink_led(hw);
                msleep_interruptible(data * 1000);
        }
  
 -      adapter->hw.mac.ops.led_off(&adapter->hw);
 +      hw->mac.ops.led_off(hw);
        clear_bit(E1000_LED_ON, &adapter->led_status);
 -      adapter->hw.mac.ops.cleanup_led(&adapter->hw);
 +      hw->mac.ops.cleanup_led(hw);
  
        return 0;
  }
index 692251b60915999e79654d981401e3e2b69b2ffb,bcd2bc477af29110636abec9920b3f4a76a244b7..523b9716a543aec21d923b58a07cd5162b3edab3
@@@ -43,9 -43,7 +43,9 @@@
   * 82567LM-2 Gigabit Network Connection
   * 82567LF-2 Gigabit Network Connection
   * 82567V-2 Gigabit Network Connection
 - * 82562GT-3 10/100 Network Connection
 + * 82567LF-3 Gigabit Network Connection
 + * 82567LM-3 Gigabit Network Connection
 + * 82567LM-4 Gigabit Network Connection
   */
  
  #include <linux/netdevice.h>
@@@ -60,6 -58,7 +60,7 @@@
  #define ICH_FLASH_HSFCTL              0x0006
  #define ICH_FLASH_FADDR                       0x0008
  #define ICH_FLASH_FDATA0              0x0010
+ #define ICH_FLASH_PR0                 0x0074
  
  #define ICH_FLASH_READ_COMMAND_TIMEOUT        500
  #define ICH_FLASH_WRITE_COMMAND_TIMEOUT       500
@@@ -152,6 -151,19 +153,19 @@@ union ich8_hws_flash_regacc 
        u16 regval;
  };
  
+ /* ICH Flash Protected Region */
+ union ich8_flash_protected_range {
+       struct ich8_pr {
+               u32 base:13;     /* 0:12 Protected Range Base */
+               u32 reserved1:2; /* 13:14 Reserved */
+               u32 rpe:1;       /* 15 Read Protection Enable */
+               u32 limit:13;    /* 16:28 Protected Range Limit */
+               u32 reserved2:2; /* 29:30 Reserved */
+               u32 wpe:1;       /* 31 Write Protection Enable */
+       } range;
+       u32 regval;
+ };
  static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
  static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
  static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
@@@ -159,15 -171,12 +173,15 @@@ static s32 e1000_check_polarity_ife_ich
  static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
  static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
                                                u32 offset, u8 byte);
 +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 +                                       u8 *data);
  static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
                                         u16 *data);
  static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
                                         u8 size, u16 *data);
  static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
  static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
  
  static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
  {
@@@ -371,6 -380,9 +385,9 @@@ static s32 e1000_get_variants_ich8lan(s
        return 0;
  }
  
+ static DEFINE_MUTEX(nvm_mutex);
+ static pid_t nvm_owner = -1;
  /**
   *  e1000_acquire_swflag_ich8lan - Acquire software control flag
   *  @hw: pointer to the HW structure
@@@ -384,6 -396,15 +401,15 @@@ static s32 e1000_acquire_swflag_ich8lan
        u32 extcnf_ctrl;
        u32 timeout = PHY_CFG_TIMEOUT;
  
+       might_sleep();
+       if (!mutex_trylock(&nvm_mutex)) {
+               WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
+                    nvm_owner);
+               mutex_lock(&nvm_mutex);
+       }
+       nvm_owner = current->pid;
        while (timeout) {
                extcnf_ctrl = er32(EXTCNF_CTRL);
                extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
  
        if (!timeout) {
                hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
 +              extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
 +              ew32(EXTCNF_CTRL, extcnf_ctrl);
+               nvm_owner = -1;
+               mutex_unlock(&nvm_mutex);
                return -E1000_ERR_CONFIG;
        }
  
@@@ -421,24 -442,11 +449,27 @@@ static void e1000_release_swflag_ich8la
        extcnf_ctrl = er32(EXTCNF_CTRL);
        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
        ew32(EXTCNF_CTRL, extcnf_ctrl);
+       nvm_owner = -1;
+       mutex_unlock(&nvm_mutex);
  }
  
 +/**
 + *  e1000_check_mng_mode_ich8lan - Checks management mode
 + *  @hw: pointer to the HW structure
 + *
 + *  This checks if the adapter has manageability enabled.
 + *  This is a function pointer entry point only called by read/write
 + *  routines for the PHY and NVM parts.
 + **/
 +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
 +{
 +      u32 fwsm = er32(FWSM);
 +
 +      return (fwsm & E1000_FWSM_MODE_MASK) ==
 +              (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
 +}
 +
  /**
   *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
   *  @hw: pointer to the HW structure
@@@ -919,56 -927,6 +950,56 @@@ static s32 e1000_set_d3_lplu_state_ich8
        return 0;
  }
  
 +/**
 + *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
 + *  @hw: pointer to the HW structure
 + *  @bank:  pointer to the variable that returns the active bank
 + *
 + *  Reads signature byte from the NVM using the flash access registers.
 + **/
 +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
 +{
 +      struct e1000_nvm_info *nvm = &hw->nvm;
 +      /* flash bank size is in words */
 +      u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
 +      u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
 +      u8 bank_high_byte = 0;
 +
 +      if (hw->mac.type != e1000_ich10lan) {
 +              if (er32(EECD) & E1000_EECD_SEC1VAL)
 +                      *bank = 1;
 +              else
 +                      *bank = 0;
 +      } else {
 +              /*
 +               * Make sure the signature for bank 0 is valid,
 +               * if not check for bank1
 +               */
 +              e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte);
 +              if ((bank_high_byte & 0xC0) == 0x80) {
 +                      *bank = 0;
 +              } else {
 +                      /*
 +                       * find if segment 1 is valid by verifying
 +                       * bit 15:14 = 10b in word 0x13
 +                       */
 +                      e1000_read_flash_byte_ich8lan(hw,
 +                                                    act_offset + bank1_offset,
 +                                                    &bank_high_byte);
 +
 +                      /* bank1 has a valid signature equivalent to SEC1V */
 +                      if ((bank_high_byte & 0xC0) == 0x80) {
 +                              *bank = 1;
 +                      } else {
 +                              hw_dbg(hw, "ERROR: EEPROM not present\n");
 +                              return -E1000_ERR_NVM;
 +                      }
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  /**
   *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
   *  @hw: pointer to the HW structure
@@@ -985,7 -943,6 +1016,7 @@@ static s32 e1000_read_nvm_ich8lan(struc
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        u32 act_offset;
        s32 ret_val;
 +      u32 bank = 0;
        u16 i, word;
  
        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
        if (ret_val)
                return ret_val;
  
 -      /* Start with the bank offset, then add the relative offset. */
 -      act_offset = (er32(EECD) & E1000_EECD_SEC1VAL)
 -                   ? nvm->flash_bank_size
 -                   : 0;
 +      ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
 +      if (ret_val)
 +              return ret_val;
 +
 +      act_offset = (bank) ? nvm->flash_bank_size : 0;
        act_offset += offset;
  
        for (i = 0; i < words; i++) {
@@@ -1149,29 -1105,6 +1180,29 @@@ static s32 e1000_read_flash_word_ich8la
        return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
  }
  
 +/**
 + *  e1000_read_flash_byte_ich8lan - Read byte from flash
 + *  @hw: pointer to the HW structure
 + *  @offset: The offset of the byte to read.
 + *  @data: Pointer to a byte to store the value read.
 + *
 + *  Reads a single byte from the NVM using the flash access registers.
 + **/
 +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 +                                       u8 *data)
 +{
 +      s32 ret_val;
 +      u16 word = 0;
 +
 +      ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
 +      if (ret_val)
 +              return ret_val;
 +
 +      *data = (u8)word;
 +
 +      return 0;
 +}
 +
  /**
   *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
   *  @hw: pointer to the HW structure
@@@ -1303,7 -1236,7 +1334,7 @@@ static s32 e1000_update_nvm_checksum_ic
  {
        struct e1000_nvm_info *nvm = &hw->nvm;
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 -      u32 i, act_offset, new_bank_offset, old_bank_offset;
 +      u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
        s32 ret_val;
        u16 data;
  
         * write to bank 0 etc.  We also need to erase the segment that
         * is going to be written
         */
 -      if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
 +      ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
 +      if (ret_val)
 +              return ret_val;
 +
 +      if (bank == 0) {
                new_bank_offset = nvm->flash_bank_size;
                old_bank_offset = 0;
                e1000_erase_flash_bank_ich8lan(hw, 1);
         * programming failed.
         */
        if (ret_val) {
+               /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
                hw_dbg(hw, "Flash commit failed.\n");
                e1000_release_swflag_ich8lan(hw);
                return ret_val;
@@@ -1475,6 -1405,49 +1507,49 @@@ static s32 e1000_validate_nvm_checksum_
        return e1000e_validate_nvm_checksum_generic(hw);
  }
  
+ /**
+  *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+  *  @hw: pointer to the HW structure
+  *
+  *  To prevent malicious write/erase of the NVM, set it to be read-only
+  *  so that the hardware ignores all write/erase cycles of the NVM via
+  *  the flash control registers.  The shadow-ram copy of the NVM will
+  *  still be updated, however any updates to this copy will not stick
+  *  across driver reloads.
+  **/
+ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+ {
+       union ich8_flash_protected_range pr0;
+       union ich8_hws_flash_status hsfsts;
+       u32 gfpreg;
+       s32 ret_val;
+       ret_val = e1000_acquire_swflag_ich8lan(hw);
+       if (ret_val)
+               return;
+       gfpreg = er32flash(ICH_FLASH_GFPREG);
+       /* Write-protect GbE Sector of NVM */
+       pr0.regval = er32flash(ICH_FLASH_PR0);
+       pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+       pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+       pr0.range.wpe = true;
+       ew32flash(ICH_FLASH_PR0, pr0.regval);
+       /*
+        * Lock down a subset of GbE Flash Control Registers, e.g.
+        * PR0 to prevent the write-protection from being lifted.
+        * Once FLOCKDN is set, the registers protected by it cannot
+        * be written until FLOCKDN is cleared by a hardware reset.
+        */
+       hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+       hsfsts.hsf_status.flockdn = true;
+       ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+       e1000_release_swflag_ich8lan(hw);
+ }
  /**
   *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
   *  @hw: pointer to the HW structure
@@@ -1822,6 -1795,9 +1897,9 @@@ static s32 e1000_reset_hw_ich8lan(struc
        ew32(CTRL, (ctrl | E1000_CTRL_RST));
        msleep(20);
  
+       /* release the swflag because it is not reset by hardware reset */
+       e1000_release_swflag_ich8lan(hw);
        ret_val = e1000e_get_auto_rd_done(hw);
        if (ret_val) {
                /*
@@@ -2291,14 -2267,13 +2369,14 @@@ void e1000e_gig_downshift_workaround_ic
   *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
   *  to a lower speed.
   *
 - *  Should only be called for ICH9 devices.
 + *  Should only be called for ICH9 and ICH10 devices.
   **/
  void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
  {
        u32 phy_ctrl;
  
 -      if (hw->mac.type == e1000_ich9lan) {
 +      if ((hw->mac.type == e1000_ich10lan) ||
 +          (hw->mac.type == e1000_ich9lan)) {
                phy_ctrl = er32(PHY_CTRL);
                phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
                            E1000_PHY_CTRL_GBE_DISABLE;
@@@ -2355,39 -2330,6 +2433,39 @@@ static s32 e1000_led_off_ich8lan(struc
        return 0;
  }
  
 +/**
 + *  e1000_get_cfg_done_ich8lan - Read config done bit
 + *  @hw: pointer to the HW structure
 + *
 + *  Read the management control register for the config done bit for
 + *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
 + *  to read the config done bit, so an error is *ONLY* logged and returns
 + *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
 + *  would not be able to be reset or change link.
 + **/
 +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
 +{
 +      u32 bank = 0;
 +
 +      e1000e_get_cfg_done(hw);
 +
 +      /* If EEPROM is not marked present, init the IGP 3 PHY manually */
 +      if (hw->mac.type != e1000_ich10lan) {
 +              if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
 +                  (hw->phy.type == e1000_phy_igp_3)) {
 +                      e1000e_phy_init_script_igp3(hw);
 +              }
 +      } else {
 +              if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
 +                      /* Maybe we should do a basic PHY config */
 +                      hw_dbg(hw, "EEPROM not present\n");
 +                      return -E1000_ERR_CONFIG;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  /**
   *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
   *  @hw: pointer to the HW structure
@@@ -2418,7 -2360,7 +2496,7 @@@ static void e1000_clear_hw_cntrs_ich8la
  }
  
  static struct e1000_mac_operations ich8_mac_ops = {
 -      .mng_mode_enab          = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
 +      .check_mng_mode         = e1000_check_mng_mode_ich8lan,
        .check_for_link         = e1000e_check_for_copper_link,
        .cleanup_led            = e1000_cleanup_led_ich8lan,
        .clear_hw_cntrs         = e1000_clear_hw_cntrs_ich8lan,
@@@ -2438,7 -2380,7 +2516,7 @@@ static struct e1000_phy_operations ich8
        .check_reset_block      = e1000_check_reset_block_ich8lan,
        .commit_phy             = NULL,
        .force_speed_duplex     = e1000_phy_force_speed_duplex_ich8lan,
 -      .get_cfg_done           = e1000e_get_cfg_done,
 +      .get_cfg_done           = e1000_get_cfg_done_ich8lan,
        .get_cable_length       = e1000e_get_cable_length_igp_2,
        .get_phy_info           = e1000_get_phy_info_ich8lan,
        .read_phy_reg           = e1000e_read_phy_reg_igp,
@@@ -2493,20 -2435,3 +2571,20 @@@ struct e1000_info e1000_ich9_info = 
        .nvm_ops                = &ich8_nvm_ops,
  };
  
 +struct e1000_info e1000_ich10_info = {
 +      .mac                    = e1000_ich10lan,
 +      .flags                  = FLAG_HAS_JUMBO_FRAMES
 +                                | FLAG_IS_ICH
 +                                | FLAG_HAS_WOL
 +                                | FLAG_RX_CSUM_ENABLED
 +                                | FLAG_HAS_CTRLEXT_ON_LOAD
 +                                | FLAG_HAS_AMT
 +                                | FLAG_HAS_ERT
 +                                | FLAG_HAS_FLASH
 +                                | FLAG_APME_IN_WUC,
 +      .pba                    = 10,
 +      .get_variants           = e1000_get_variants_ich8lan,
 +      .mac_ops                = &ich8_mac_ops,
 +      .phy_ops                = &ich8_phy_ops,
 +      .nvm_ops                = &ich8_nvm_ops,
 +};
index 24d05cb700553fbefc306e0b99015c53dd4018f6,b81c4237b5d30a11dc7f6e3dcefdbbc3ae202fb2..1b72749979c4aa221d3b99c00bb93eb096f4f87d
@@@ -47,7 -47,7 +47,7 @@@
  
  #include "e1000.h"
  
- #define DRV_VERSION "0.3.3.3-k2"
+ #define DRV_VERSION "0.3.3.3-k6"
  char e1000e_driver_name[] = "e1000e";
  const char e1000e_driver_version[] = DRV_VERSION;
  
@@@ -55,11 -55,9 +55,11 @@@ static const struct e1000_info *e1000_i
        [board_82571]           = &e1000_82571_info,
        [board_82572]           = &e1000_82572_info,
        [board_82573]           = &e1000_82573_info,
 +      [board_82574]           = &e1000_82574_info,
        [board_80003es2lan]     = &e1000_es2_info,
        [board_ich8lan]         = &e1000_ich8_info,
        [board_ich9lan]         = &e1000_ich9_info,
 +      [board_ich10lan]        = &e1000_ich10_info,
  };
  
  #ifdef DEBUG
@@@ -1117,6 -1115,14 +1117,14 @@@ static void e1000_clean_rx_ring(struct 
        writel(0, adapter->hw.hw_addr + rx_ring->tail);
  }
  
+ static void e1000e_downshift_workaround(struct work_struct *work)
+ {
+       struct e1000_adapter *adapter = container_of(work,
+                                       struct e1000_adapter, downshift_task);
+       e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+ }
  /**
   * e1000_intr_msi - Interrupt Handler
   * @irq: interrupt number
@@@ -1141,7 -1147,7 +1149,7 @@@ static irqreturn_t e1000_intr_msi(int i
                 */
                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
                    (!(er32(STATUS) & E1000_STATUS_LU)))
-                       e1000e_gig_downshift_workaround_ich8lan(hw);
+                       schedule_work(&adapter->downshift_task);
  
                /*
                 * 80003ES2LAN workaround-- For packet buffer work-around on
@@@ -1181,8 -1187,8 +1189,8 @@@ static irqreturn_t e1000_intr(int irq, 
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -
        u32 rctl, icr = er32(ICR);
 +
        if (!icr)
                return IRQ_NONE;  /* Not our interrupt */
  
                 */
                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
                    (!(er32(STATUS) & E1000_STATUS_LU)))
-                       e1000e_gig_downshift_workaround_ich8lan(hw);
+                       schedule_work(&adapter->downshift_task);
  
                /*
                 * 80003ES2LAN workaround--
        return IRQ_HANDLED;
  }
  
 +static irqreturn_t e1000_msix_other(int irq, void *data)
 +{
 +      struct net_device *netdev = data;
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_hw *hw = &adapter->hw;
 +      u32 icr = er32(ICR);
 +
 +      if (!(icr & E1000_ICR_INT_ASSERTED)) {
 +              ew32(IMS, E1000_IMS_OTHER);
 +              return IRQ_NONE;
 +      }
 +
 +      if (icr & adapter->eiac_mask)
 +              ew32(ICS, (icr & adapter->eiac_mask));
 +
 +      if (icr & E1000_ICR_OTHER) {
 +              if (!(icr & E1000_ICR_LSC))
 +                      goto no_link_interrupt;
 +              hw->mac.get_link_status = 1;
 +              /* guard against interrupt when we're going down */
 +              if (!test_bit(__E1000_DOWN, &adapter->state))
 +                      mod_timer(&adapter->watchdog_timer, jiffies + 1);
 +      }
 +
 +no_link_interrupt:
 +      ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +
 +static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
 +{
 +      struct net_device *netdev = data;
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_hw *hw = &adapter->hw;
 +      struct e1000_ring *tx_ring = adapter->tx_ring;
 +
 +
 +      adapter->total_tx_bytes = 0;
 +      adapter->total_tx_packets = 0;
 +
 +      if (!e1000_clean_tx_irq(adapter))
 +              /* Ring was not completely cleaned, so fire another interrupt */
 +              ew32(ICS, tx_ring->ims_val);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
 +{
 +      struct net_device *netdev = data;
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      /* Write the ITR value calculated at the end of the
 +       * previous interrupt.
 +       */
 +      if (adapter->rx_ring->set_itr) {
 +              writel(1000000000 / (adapter->rx_ring->itr_val * 256),
 +                     adapter->hw.hw_addr + adapter->rx_ring->itr_register);
 +              adapter->rx_ring->set_itr = 0;
 +      }
 +
 +      if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
 +              adapter->total_rx_bytes = 0;
 +              adapter->total_rx_packets = 0;
 +              __netif_rx_schedule(netdev, &adapter->napi);
 +      }
 +      return IRQ_HANDLED;
 +}
 +
 +/**
 + * e1000_configure_msix - Configure MSI-X hardware
 + *
 + * e1000_configure_msix sets up the hardware to properly
 + * generate MSI-X interrupts.
 + **/
 +static void e1000_configure_msix(struct e1000_adapter *adapter)
 +{
 +      struct e1000_hw *hw = &adapter->hw;
 +      struct e1000_ring *rx_ring = adapter->rx_ring;
 +      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      int vector = 0;
 +      u32 ctrl_ext, ivar = 0;
 +
 +      adapter->eiac_mask = 0;
 +
 +      /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
 +      if (hw->mac.type == e1000_82574) {
 +              u32 rfctl = er32(RFCTL);
 +              rfctl |= E1000_RFCTL_ACK_DIS;
 +              ew32(RFCTL, rfctl);
 +      }
 +
 +#define E1000_IVAR_INT_ALLOC_VALID    0x8
 +      /* Configure Rx vector */
 +      rx_ring->ims_val = E1000_IMS_RXQ0;
 +      adapter->eiac_mask |= rx_ring->ims_val;
 +      if (rx_ring->itr_val)
 +              writel(1000000000 / (rx_ring->itr_val * 256),
 +                     hw->hw_addr + rx_ring->itr_register);
 +      else
 +              writel(1, hw->hw_addr + rx_ring->itr_register);
 +      ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
 +
 +      /* Configure Tx vector */
 +      tx_ring->ims_val = E1000_IMS_TXQ0;
 +      vector++;
 +      if (tx_ring->itr_val)
 +              writel(1000000000 / (tx_ring->itr_val * 256),
 +                     hw->hw_addr + tx_ring->itr_register);
 +      else
 +              writel(1, hw->hw_addr + tx_ring->itr_register);
 +      adapter->eiac_mask |= tx_ring->ims_val;
 +      ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
 +
 +      /* set vector for Other Causes, e.g. link changes */
 +      vector++;
 +      ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
 +      if (rx_ring->itr_val)
 +              writel(1000000000 / (rx_ring->itr_val * 256),
 +                     hw->hw_addr + E1000_EITR_82574(vector));
 +      else
 +              writel(1, hw->hw_addr + E1000_EITR_82574(vector));
 +
 +      /* Cause Tx interrupts on every write back */
 +      ivar |= (1 << 31);
 +
 +      ew32(IVAR, ivar);
 +
 +      /* enable MSI-X PBA support */
 +      ctrl_ext = er32(CTRL_EXT);
 +      ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
 +
 +      /* Auto-Mask Other interrupts upon ICR read */
 +#define E1000_EIAC_MASK_82574   0x01F00000
 +      ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
 +      ctrl_ext |= E1000_CTRL_EXT_EIAME;
 +      ew32(CTRL_EXT, ctrl_ext);
 +      e1e_flush();
 +}
 +
 +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
 +{
 +      if (adapter->msix_entries) {
 +              pci_disable_msix(adapter->pdev);
 +              kfree(adapter->msix_entries);
 +              adapter->msix_entries = NULL;
 +      } else if (adapter->flags & FLAG_MSI_ENABLED) {
 +              pci_disable_msi(adapter->pdev);
 +              adapter->flags &= ~FLAG_MSI_ENABLED;
 +      }
 +
 +      return;
 +}
 +
 +/**
 + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
 + *
 + * Attempt to configure interrupts using the best available
 + * capabilities of the hardware and kernel.
 + **/
 +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
 +{
 +      int err;
 +      int numvecs, i;
 +
 +
 +      switch (adapter->int_mode) {
 +      case E1000E_INT_MODE_MSIX:
 +              if (adapter->flags & FLAG_HAS_MSIX) {
 +                      numvecs = 3; /* RxQ0, TxQ0 and other */
 +                      adapter->msix_entries = kcalloc(numvecs,
 +                                                    sizeof(struct msix_entry),
 +                                                    GFP_KERNEL);
 +                      if (adapter->msix_entries) {
 +                              for (i = 0; i < numvecs; i++)
 +                                      adapter->msix_entries[i].entry = i;
 +
 +                              err = pci_enable_msix(adapter->pdev,
 +                                                    adapter->msix_entries,
 +                                                    numvecs);
 +                              if (err == 0)
 +                                      return;
 +                      }
 +                      /* MSI-X failed, so fall through and try MSI */
 +                      e_err("Failed to initialize MSI-X interrupts.  "
 +                            "Falling back to MSI interrupts.\n");
 +                      e1000e_reset_interrupt_capability(adapter);
 +              }
 +              adapter->int_mode = E1000E_INT_MODE_MSI;
 +              /* Fall through */
 +      case E1000E_INT_MODE_MSI:
 +              if (!pci_enable_msi(adapter->pdev)) {
 +                      adapter->flags |= FLAG_MSI_ENABLED;
 +              } else {
 +                      adapter->int_mode = E1000E_INT_MODE_LEGACY;
 +                      e_err("Failed to initialize MSI interrupts.  Falling "
 +                            "back to legacy interrupts.\n");
 +              }
 +              /* Fall through */
 +      case E1000E_INT_MODE_LEGACY:
 +              /* Don't do anything; this is the system default */
 +              break;
 +      }
 +
 +      return;
 +}
 +
 +/**
 + * e1000_request_msix - Initialize MSI-X interrupts
 + *
 + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
 + * kernel.
 + **/
 +static int e1000_request_msix(struct e1000_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      int err = 0, vector = 0;
 +
 +      if (strlen(netdev->name) < (IFNAMSIZ - 5))
 +              sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name);
 +      else
 +              memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
 +      err = request_irq(adapter->msix_entries[vector].vector,
 +                        &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
 +                        netdev);
 +      if (err)
 +              goto out;
 +      adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
 +      adapter->rx_ring->itr_val = adapter->itr;
 +      vector++;
 +
 +      if (strlen(netdev->name) < (IFNAMSIZ - 5))
 +              sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name);
 +      else
 +              memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
 +      err = request_irq(adapter->msix_entries[vector].vector,
 +                        &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
 +                        netdev);
 +      if (err)
 +              goto out;
 +      adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
 +      adapter->tx_ring->itr_val = adapter->itr;
 +      vector++;
 +
 +      err = request_irq(adapter->msix_entries[vector].vector,
 +                        &e1000_msix_other, 0, netdev->name, netdev);
 +      if (err)
 +              goto out;
 +
 +      e1000_configure_msix(adapter);
 +      return 0;
 +out:
 +      return err;
 +}
 +
  /**
   * e1000_request_irq - initialize interrupts
   *
  static int e1000_request_irq(struct e1000_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
 -      int irq_flags = IRQF_SHARED;
        int err;
  
 -      if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) {
 -              err = pci_enable_msi(adapter->pdev);
 -              if (!err) {
 -                      adapter->flags |= FLAG_MSI_ENABLED;
 -                      irq_flags = 0;
 -              }
 +      if (adapter->msix_entries) {
 +              err = e1000_request_msix(adapter);
 +              if (!err)
 +                      return err;
 +              /* fall back to MSI */
 +              e1000e_reset_interrupt_capability(adapter);
 +              adapter->int_mode = E1000E_INT_MODE_MSI;
 +              e1000e_set_interrupt_capability(adapter);
        }
 +      if (adapter->flags & FLAG_MSI_ENABLED) {
 +              err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
 +                                netdev->name, netdev);
 +              if (!err)
 +                      return err;
  
 -      err = request_irq(adapter->pdev->irq,
 -                        ((adapter->flags & FLAG_MSI_ENABLED) ?
 -                              &e1000_intr_msi : &e1000_intr),
 -                        irq_flags, netdev->name, netdev);
 -      if (err) {
 -              if (adapter->flags & FLAG_MSI_ENABLED) {
 -                      pci_disable_msi(adapter->pdev);
 -                      adapter->flags &= ~FLAG_MSI_ENABLED;
 -              }
 -              e_err("Unable to allocate interrupt, Error: %d\n", err);
 +              /* fall back to legacy interrupt */
 +              e1000e_reset_interrupt_capability(adapter);
 +              adapter->int_mode = E1000E_INT_MODE_LEGACY;
        }
  
 +      err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
 +                        netdev->name, netdev);
 +      if (err)
 +              e_err("Unable to allocate interrupt, Error: %d\n", err);
 +
        return err;
  }
  
@@@ -1538,21 -1283,11 +1546,21 @@@ static void e1000_free_irq(struct e1000
  {
        struct net_device *netdev = adapter->netdev;
  
 -      free_irq(adapter->pdev->irq, netdev);
 -      if (adapter->flags & FLAG_MSI_ENABLED) {
 -              pci_disable_msi(adapter->pdev);
 -              adapter->flags &= ~FLAG_MSI_ENABLED;
 +      if (adapter->msix_entries) {
 +              int vector = 0;
 +
 +              free_irq(adapter->msix_entries[vector].vector, netdev);
 +              vector++;
 +
 +              free_irq(adapter->msix_entries[vector].vector, netdev);
 +              vector++;
 +
 +              /* Other Causes interrupt vector */
 +              free_irq(adapter->msix_entries[vector].vector, netdev);
 +              return;
        }
 +
 +      free_irq(adapter->pdev->irq, netdev);
  }
  
  /**
@@@ -1563,8 -1298,6 +1571,8 @@@ static void e1000_irq_disable(struct e1
        struct e1000_hw *hw = &adapter->hw;
  
        ew32(IMC, ~0);
 +      if (adapter->msix_entries)
 +              ew32(EIAC_82574, 0);
        e1e_flush();
        synchronize_irq(adapter->pdev->irq);
  }
@@@ -1576,12 -1309,7 +1584,12 @@@ static void e1000_irq_enable(struct e10
  {
        struct e1000_hw *hw = &adapter->hw;
  
 -      ew32(IMS, IMS_ENABLE_MASK);
 +      if (adapter->msix_entries) {
 +              ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
 +              ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
 +      } else {
 +              ew32(IMS, IMS_ENABLE_MASK);
 +      }
        e1e_flush();
  }
  
@@@ -1831,8 -1559,9 +1839,8 @@@ void e1000e_free_rx_resources(struct e1
   *      traffic pattern.  Constants in this function were computed
   *      based on theoretical maximum wire speed and thresholds were set based
   *      on testing data as well as attempting to minimize response time
 - *      while increasing bulk throughput.
 - *      this functionality is controlled by the InterruptThrottleRate module
 - *      parameter (see e1000_param.c)
 + *      while increasing bulk throughput.  This functionality is controlled
 + *      by the InterruptThrottleRate module parameter.
   **/
  static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
                                     u16 itr_setting, int packets,
@@@ -1940,36 -1669,10 +1948,36 @@@ set_itr_now
                             min(adapter->itr + (new_itr >> 2), new_itr) :
                             new_itr;
                adapter->itr = new_itr;
 -              ew32(ITR, 1000000000 / (new_itr * 256));
 +              adapter->rx_ring->itr_val = new_itr;
 +              if (adapter->msix_entries)
 +                      adapter->rx_ring->set_itr = 1;
 +              else
 +                      ew32(ITR, 1000000000 / (new_itr * 256));
        }
  }
  
 +/**
 + * e1000_alloc_queues - Allocate memory for all rings
 + * @adapter: board private structure to initialize
 + **/
 +static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
 +{
 +      adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 +      if (!adapter->tx_ring)
 +              goto err;
 +
 +      adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 +      if (!adapter->rx_ring)
 +              goto err;
 +
 +      return 0;
 +err:
 +      e_err("Unable to allocate memory for queues\n");
 +      kfree(adapter->rx_ring);
 +      kfree(adapter->tx_ring);
 +      return -ENOMEM;
 +}
 +
  /**
   * e1000_clean - NAPI Rx polling callback
   * @napi: struct associated with this polling callback
  static int e1000_clean(struct napi_struct *napi, int budget)
  {
        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
 +      struct e1000_hw *hw = &adapter->hw;
        struct net_device *poll_dev = adapter->netdev;
        int tx_cleaned = 0, work_done = 0;
  
        /* Must NOT use netdev_priv macro here. */
        adapter = poll_dev->priv;
  
 +      if (adapter->msix_entries &&
 +          !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
 +              goto clean_rx;
 +
        /*
         * e1000_clean is called per-cpu.  This lock protects
         * tx_ring from being cleaned by multiple cpus
                spin_unlock(&adapter->tx_queue_lock);
        }
  
 +clean_rx:
        adapter->clean_rx(adapter, &work_done, budget);
  
        if (tx_cleaned)
                if (adapter->itr_setting & 3)
                        e1000_set_itr(adapter);
                netif_rx_complete(poll_dev, napi);
 -              e1000_irq_enable(adapter);
 +              if (adapter->msix_entries)
 +                      ew32(IMS, adapter->rx_ring->ims_val);
 +              else
 +                      e1000_irq_enable(adapter);
        }
  
        return work_done;
@@@ -2810,8 -2504,6 +2818,8 @@@ int e1000e_up(struct e1000_adapter *ada
        clear_bit(__E1000_DOWN, &adapter->state);
  
        napi_enable(&adapter->napi);
 +      if (adapter->msix_entries)
 +              e1000_configure_msix(adapter);
        e1000_irq_enable(adapter);
  
        /* fire a link change interrupt to start the watchdog */
@@@ -2895,20 -2587,27 +2903,18 @@@ static int __devinit e1000_sw_init(stru
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  
 -      adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 -      if (!adapter->tx_ring)
 -              goto err;
 +      e1000e_set_interrupt_capability(adapter);
  
 -      adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 -      if (!adapter->rx_ring)
 -              goto err;
 +      if (e1000_alloc_queues(adapter))
 +              return -ENOMEM;
  
        spin_lock_init(&adapter->tx_queue_lock);
  
        /* Explicitly disable IRQ since the NIC can be in any state. */
        e1000_irq_disable(adapter);
  
-       spin_lock_init(&adapter->stats_lock);
        set_bit(__E1000_DOWN, &adapter->state);
        return 0;
 -
 -err:
 -      e_err("Unable to allocate memory for queues\n");
 -      kfree(adapter->rx_ring);
 -      kfree(adapter->tx_ring);
 -      return -ENOMEM;
  }
  
  /**
@@@ -2950,7 -2649,6 +2956,7 @@@ static int e1000_test_msi_interrupt(str
  
        /* free the real vector and request a test handler */
        e1000_free_irq(adapter);
 +      e1000e_reset_interrupt_capability(adapter);
  
        /* Assume that the test fails, if it succeeds then the test
         * MSI irq handler will unset this flag */
        rmb();
  
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
 +              adapter->int_mode = E1000E_INT_MODE_LEGACY;
                err = -EIO;
                e_info("MSI interrupt test failed!\n");
        }
        /* okay so the test worked, restore settings */
        e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
  msi_test_failed:
 -      /* restore the original vector, even if it failed */
 +      e1000e_set_interrupt_capability(adapter);
        e1000_request_irq(adapter);
        return err;
  }
@@@ -3105,7 -2802,7 +3111,7 @@@ static int e1000_open(struct net_devic
         * ignore e1000e MSI messages, which means we need to test our MSI
         * interrupt now
         */
 -      {
 +      if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
                err = e1000_test_msi(adapter);
                if (err) {
                        e_err("Interrupt allocation failed\n");
@@@ -3221,6 -2918,21 +3227,21 @@@ static int e1000_set_mac(struct net_dev
        return 0;
  }
  
+ /**
+  * e1000e_update_phy_task - work thread to update phy
+  * @work: pointer to our work struct
+  *
+  * this worker thread exists because we must acquire a
+  * semaphore to read the phy, which we could msleep while
+  * waiting for it, and we can't msleep in a timer.
+  **/
+ static void e1000e_update_phy_task(struct work_struct *work)
+ {
+       struct e1000_adapter *adapter = container_of(work,
+                                       struct e1000_adapter, update_phy_task);
+       e1000_get_phy_info(&adapter->hw);
+ }
  /*
   * Need to wait a few seconds after link up to get diagnostic information from
   * the phy
  static void e1000_update_phy_info(unsigned long data)
  {
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-       e1000_get_phy_info(&adapter->hw);
+       schedule_work(&adapter->update_phy_task);
  }
  
  /**
@@@ -3239,10 -2951,6 +3260,6 @@@ void e1000e_update_stats(struct e1000_a
  {
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       unsigned long irq_flags;
-       u16 phy_tmp;
- #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  
        /*
         * Prevent stats update while adapter is being reset, or if the pci
        if (pci_channel_offline(pdev))
                return;
  
-       spin_lock_irqsave(&adapter->stats_lock, irq_flags);
-       /*
-        * these counters are modified from e1000_adjust_tbi_stats,
-        * called from the interrupt context, so they must only
-        * be written while holding adapter->stats_lock
-        */
        adapter->stats.crcerrs += er32(CRCERRS);
        adapter->stats.gprc += er32(GPRC);
        adapter->stats.gorc += er32(GORCL);
  
        adapter->stats.algnerrc += er32(ALGNERRC);
        adapter->stats.rxerrc += er32(RXERRC);
 -      adapter->stats.tncrs += er32(TNCRS);
 +      if (hw->mac.type != e1000_82574)
 +              adapter->stats.tncrs += er32(TNCRS);
        adapter->stats.cexterr += er32(CEXTERR);
        adapter->stats.tsctc += er32(TSCTC);
        adapter->stats.tsctfc += er32(TSCTFC);
  
        /* Tx Dropped needs to be maintained elsewhere */
  
-       /* Phy Stats */
-       if (hw->phy.media_type == e1000_media_type_copper) {
-               if ((adapter->link_speed == SPEED_1000) &&
-                  (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
-                       phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
-                       adapter->phy_stats.idle_errors += phy_tmp;
-               }
-       }
        /* Management Stats */
        adapter->stats.mgptc += er32(MGTPTC);
        adapter->stats.mgprc += er32(MGTPRC);
        adapter->stats.mgpdc += er32(MGTPDC);
-       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
  }
  
  /**
@@@ -3358,10 -3046,6 +3356,6 @@@ static void e1000_phy_read_status(struc
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_phy_regs *phy = &adapter->phy_regs;
        int ret_val;
-       unsigned long irq_flags;
-       spin_lock_irqsave(&adapter->stats_lock, irq_flags);
  
        if ((er32(STATUS) & E1000_STATUS_LU) &&
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
                phy->stat1000 = 0;
                phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
        }
-       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
  }
  
  static void e1000_print_link_info(struct e1000_adapter *adapter)
@@@ -3510,27 -3192,6 +3502,27 @@@ static void e1000_watchdog_task(struct 
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
                        e1000_print_link_info(adapter);
 +                      /*
 +                       * On supported PHYs, check for duplex mismatch only
 +                       * if link has autonegotiated at 10/100 half
 +                       */
 +                      if ((hw->phy.type == e1000_phy_igp_3 ||
 +                           hw->phy.type == e1000_phy_bm) &&
 +                          (hw->mac.autoneg == true) &&
 +                          (adapter->link_speed == SPEED_10 ||
 +                           adapter->link_speed == SPEED_100) &&
 +                          (adapter->link_duplex == HALF_DUPLEX)) {
 +                              u16 autoneg_exp;
 +
 +                              e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
 +
 +                              if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
 +                                      e_info("Autonegotiated half duplex but"
 +                                             " link partner cannot autoneg. "
 +                                             " Try forcing full duplex if "
 +                                             "link gets many collisions.\n");
 +                      }
 +
                        /*
                         * tweak tx_queue_len according to speed/duplex
                         * and adjust the timeout factor
@@@ -3646,10 -3307,7 +3638,10 @@@ link_up
        }
  
        /* Cause software interrupt to ensure Rx ring is cleaned */
 -      ew32(ICS, E1000_ICS_RXDMT0);
 +      if (adapter->msix_entries)
 +              ew32(ICS, adapter->rx_ring->ims_val);
 +      else
 +              ew32(ICS, E1000_ICS_RXDMT0);
  
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = 1;
@@@ -4366,7 -4024,6 +4358,7 @@@ static int e1000_suspend(struct pci_de
                e1000e_down(adapter);
                e1000_free_irq(adapter);
        }
 +      e1000e_reset_interrupt_capability(adapter);
  
        retval = pci_save_state(pdev);
        if (retval)
@@@ -4493,7 -4150,6 +4485,7 @@@ static int e1000_resume(struct pci_dev 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
  
 +      e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
                err = e1000_request_irq(adapter);
                if (err)
@@@ -4671,15 -4327,13 +4663,15 @@@ static void e1000_eeprom_checks(struct 
        ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
        if (!(le16_to_cpu(buf) & (1 << 0))) {
                /* Deep Smart Power Down (DSPD) */
 -              e_warn("Warning: detected DSPD enabled in EEPROM\n");
 +              dev_warn(&adapter->pdev->dev,
 +                       "Warning: detected DSPD enabled in EEPROM\n");
        }
  
        ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
        if (le16_to_cpu(buf) & (3 << 2)) {
                /* ASPM enable */
 -              e_warn("Warning: detected ASPM enabled in EEPROM\n");
 +              dev_warn(&adapter->pdev->dev,
 +                       "Warning: detected ASPM enabled in EEPROM\n");
        }
  }
  
@@@ -4822,6 -4476,10 +4814,10 @@@ static int __devinit e1000_probe(struc
        if (err)
                goto err_hw_init;
  
+       if ((adapter->flags & FLAG_IS_ICH) &&
+           (adapter->flags & FLAG_READ_ONLY_NVM))
+               e1000e_write_protect_nvm_ich8lan(&adapter->hw);
        hw->mac.ops.get_bus_info(&adapter->hw);
  
        adapter->hw.phy.autoneg_wait_to_complete = 0;
  
        INIT_WORK(&adapter->reset_task, e1000_reset_task);
        INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+       INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+       INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
  
        /* Initialize link parameters. User can change them with ethtool */
        adapter->hw.mac.autoneg = 1;
@@@ -5042,7 -4702,6 +5040,7 @@@ static void __devexit e1000_remove(stru
        if (!e1000_check_reset_block(&adapter->hw))
                e1000_phy_hw_reset(&adapter->hw);
  
 +      e1000e_reset_interrupt_capability(adapter);
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
  
@@@ -5084,8 -4743,6 +5082,8 @@@ static struct pci_device_id e1000_pci_t
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
  
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
 +
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
          board_80003es2lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
  
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
 +
        { }     /* terminate list */
  };
  MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
index f46db6cda487c1097dff1910145b5ec947d84cda,d91dbf7ba4341665c403fe9491f81355098db80d..77a3d7207a5f834db470731502e1197c973c5102
@@@ -114,15 -114,6 +114,15 @@@ E1000_PARAM(InterruptThrottleRate, "Int
  #define DEFAULT_ITR 3
  #define MAX_ITR 100000
  #define MIN_ITR 100
 +/* IntMode (Interrupt Mode)
 + *
 + * Valid Range: 0 - 2
 + *
 + * Default Value: 2 (MSI-X)
 + */
 +E1000_PARAM(IntMode, "Interrupt Mode");
 +#define MAX_INTMODE   2
 +#define MIN_INTMODE   0
  
  /*
   * Enable Smart Power Down of the PHY
@@@ -142,6 -133,15 +142,15 @@@ E1000_PARAM(SmartPowerDownEnable, "Enab
   */
  E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
  
+ /*
+  * Write Protect NVM
+  *
+  * Valid Range: 0, 1
+  *
+  * Default Value: 1 (enabled)
+  */
+ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
  struct e1000_option {
        enum { enable_option, range_option, list_option } type;
        const char *name;
@@@ -361,24 -361,6 +370,24 @@@ void __devinit e1000e_check_options(str
                        adapter->itr = 20000;
                }
        }
 +      { /* Interrupt Mode */
 +              struct e1000_option opt = {
 +                      .type = range_option,
 +                      .name = "Interrupt Mode",
 +                      .err  = "defaulting to 2 (MSI-X)",
 +                      .def  = E1000E_INT_MODE_MSIX,
 +                      .arg  = { .r = { .min = MIN_INTMODE,
 +                                       .max = MAX_INTMODE } }
 +              };
 +
 +              if (num_IntMode > bd) {
 +                      unsigned int int_mode = IntMode[bd];
 +                      e1000_validate_option(&int_mode, &opt, adapter);
 +                      adapter->int_mode = int_mode;
 +              } else {
 +                      adapter->int_mode = opt.def;
 +              }
 +      }
        { /* Smart Power Down */
                const struct e1000_option opt = {
                        .type = enable_option,
                                                                       opt.def);
                }
        }
+       { /* Write-protect NVM */
+               const struct e1000_option opt = {
+                       .type = enable_option,
+                       .name = "Write-protect NVM",
+                       .err  = "defaulting to Enabled",
+                       .def  = OPTION_ENABLED
+               };
+               if (adapter->flags & FLAG_IS_ICH) {
+                       if (num_WriteProtectNVM > bd) {
+                               unsigned int write_protect_nvm = WriteProtectNVM[bd];
+                               e1000_validate_option(&write_protect_nvm, &opt,
+                                                     adapter);
+                               if (write_protect_nvm)
+                                       adapter->flags |= FLAG_READ_ONLY_NVM;
+                       } else {
+                               if (opt.def)
+                                       adapter->flags |= FLAG_READ_ONLY_NVM;
+                       }
+               }
+       }
  }
diff --combined net/core/dev.c
index 7091040e32ac53e255e99aa4d2c62b9a9056ca7f,0ae08d3f57e79730ac893dbb8a046a6678cd622a..1408a083fe4e28c790f5f1184544993fed6642f2
@@@ -891,7 -891,7 +891,7 @@@ int dev_alloc_name(struct net_device *d
   *    Change name of a device, can pass format strings "eth%d".
   *    for wildcarding.
   */
 -int dev_change_name(struct net_device *dev, char *newname)
 +int dev_change_name(struct net_device *dev, const char *newname)
  {
        char oldname[IFNAMSIZ];
        int err = 0;
                err = dev_alloc_name(dev, newname);
                if (err < 0)
                        return err;
 -              strcpy(newname, dev->name);
        }
        else if (__dev_get_by_name(net, newname))
                return -EEXIST;
@@@ -953,38 -954,6 +953,38 @@@ rollback
        return err;
  }
  
 +/**
 + *    dev_set_alias - change ifalias of a device
 + *    @dev: device
 + *    @alias: name up to IFALIASZ
 + *    @len: limit of bytes to copy from info
 + *
 + *    Set ifalias for a device,
 + */
 +int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
 +{
 +      ASSERT_RTNL();
 +
 +      if (len >= IFALIASZ)
 +              return -EINVAL;
 +
 +      if (!len) {
 +              if (dev->ifalias) {
 +                      kfree(dev->ifalias);
 +                      dev->ifalias = NULL;
 +              }
 +              return 0;
 +      }
 +
 +      dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
 +      if (!dev->ifalias)
 +              return -ENOMEM;
 +
 +      strlcpy(dev->ifalias, alias, len+1);
 +      return len;
 +}
 +
 +
  /**
   *    netdev_features_change - device changes features
   *    @dev: device to cause notification
@@@ -1707,14 -1676,14 +1707,14 @@@ static u16 simple_tx_hash(struct net_de
        }
  
        switch (skb->protocol) {
 -      case __constant_htons(ETH_P_IP):
 +      case htons(ETH_P_IP):
                if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
                        ip_proto = ip_hdr(skb)->protocol;
                addr1 = ip_hdr(skb)->saddr;
                addr2 = ip_hdr(skb)->daddr;
                ihl = ip_hdr(skb)->ihl;
                break;
 -      case __constant_htons(ETH_P_IPV6):
 +      case htons(ETH_P_IPV6):
                ip_proto = ipv6_hdr(skb)->nexthdr;
                addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
                addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@@ -2949,6 -2918,12 +2949,12 @@@ int netdev_set_master(struct net_devic
        return 0;
  }
  
+ static void dev_change_rx_flags(struct net_device *dev, int flags)
+ {
+       if (dev->flags & IFF_UP && dev->change_rx_flags)
+               dev->change_rx_flags(dev, flags);
+ }
  static int __dev_set_promiscuity(struct net_device *dev, int inc)
  {
        unsigned short old_flags = dev->flags;
                                current->uid, current->gid,
                                audit_get_sessionid(current));
  
-               if (dev->change_rx_flags)
-                       dev->change_rx_flags(dev, IFF_PROMISC);
+               dev_change_rx_flags(dev, IFF_PROMISC);
        }
        return 0;
  }
@@@ -3053,8 -3027,7 +3058,7 @@@ int dev_set_allmulti(struct net_device 
                }
        }
        if (dev->flags ^ old_flags) {
-               if (dev->change_rx_flags)
-                       dev->change_rx_flags(dev, IFF_ALLMULTI);
+               dev_change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
        }
        return 0;
@@@ -3333,12 -3306,6 +3337,12 @@@ static void dev_addr_discard(struct net
        netif_addr_unlock_bh(dev);
  }
  
 +/**
 + *    dev_get_flags - get flags reported to userspace
 + *    @dev: device
 + *
 + *    Get the combination of flag bits exported through APIs to userspace.
 + */
  unsigned dev_get_flags(const struct net_device *dev)
  {
        unsigned flags;
        return flags;
  }
  
 +/**
 + *    dev_change_flags - change device settings
 + *    @dev: device
 + *    @flags: device state flags
 + *
 + *    Change settings on device based state flags. The flags are
 + *    in the userspace exported format.
 + */
  int dev_change_flags(struct net_device *dev, unsigned flags)
  {
        int ret, changes;
         *      Load in the correct multicast list now the flags have changed.
         */
  
-       if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
-               dev->change_rx_flags(dev, IFF_MULTICAST);
+       if ((old_flags ^ flags) & IFF_MULTICAST)
+               dev_change_rx_flags(dev, IFF_MULTICAST);
  
        dev_set_rx_mode(dev);
  
        return ret;
  }
  
 +/**
 + *    dev_set_mtu - Change maximum transfer unit
 + *    @dev: device
 + *    @new_mtu: new transfer unit
 + *
 + *    Change the maximum transfer size of the network device.
 + */
  int dev_set_mtu(struct net_device *dev, int new_mtu)
  {
        int err;
        return err;
  }
  
 +/**
 + *    dev_set_mac_address - Change Media Access Control Address
 + *    @dev: device
 + *    @sa: new address
 + *
 + *    Change the hardware (MAC) address of the device
 + */
  int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  {
        int err;
@@@ -3867,14 -3812,11 +3871,11 @@@ static int dev_new_index(struct net *ne
  }
  
  /* Delayed registration/unregisteration */
- static DEFINE_SPINLOCK(net_todo_list_lock);
  static LIST_HEAD(net_todo_list);
  
  static void net_set_todo(struct net_device *dev)
  {
-       spin_lock(&net_todo_list_lock);
        list_add_tail(&dev->todo_list, &net_todo_list);
-       spin_unlock(&net_todo_list_lock);
  }
  
  static void rollback_registered(struct net_device *dev)
@@@ -4201,33 -4143,24 +4202,24 @@@ static void netdev_wait_allrefs(struct 
   *    free_netdev(y1);
   *    free_netdev(y2);
   *
-  * We are invoked by rtnl_unlock() after it drops the semaphore.
+  * We are invoked by rtnl_unlock().
   * This allows us to deal with problems:
   * 1) We can delete sysfs objects which invoke hotplug
   *    without deadlocking with linkwatch via keventd.
   * 2) Since we run with the RTNL semaphore not held, we can sleep
   *    safely in order to wait for the netdev refcnt to drop to zero.
+  *
+  * We must not return until all unregister events added during
+  * the interval the lock was held have been completed.
   */
- static DEFINE_MUTEX(net_todo_run_mutex);
  void netdev_run_todo(void)
  {
        struct list_head list;
  
-       /* Need to guard against multiple cpu's getting out of order. */
-       mutex_lock(&net_todo_run_mutex);
-       /* Not safe to do outside the semaphore.  We must not return
-        * until all unregister events invoked by the local processor
-        * have been completed (either by this todo run, or one on
-        * another cpu).
-        */
-       if (list_empty(&net_todo_list))
-               goto out;
        /* Snapshot list, allow later requests */
-       spin_lock(&net_todo_list_lock);
        list_replace_init(&net_todo_list, &list);
-       spin_unlock(&net_todo_list_lock);
+       __rtnl_unlock();
  
        while (!list_empty(&list)) {
                struct net_device *dev
                /* Free network device */
                kobject_put(&dev->dev.kobj);
        }
- out:
-       mutex_unlock(&net_todo_run_mutex);
  }
  
  static struct net_device_stats *internal_stats(struct net_device *dev)
@@@ -4381,12 -4311,7 +4370,12 @@@ void free_netdev(struct net_device *dev
        put_device(&dev->dev);
  }
  
 -/* Synchronize with packet receive processing. */
 +/**
 + *    synchronize_net -  Synchronize with packet receive processing
 + *
 + *    Wait for packets currently being received to be done.
 + *    Does not block later packets from starting.
 + */
  void synchronize_net(void)
  {
        might_sleep();
@@@ -4688,7 -4613,7 +4677,7 @@@ netdev_dma_event(struct dma_client *cli
  }
  
  /**
 - * netdev_dma_regiser - register the networking subsystem as a DMA client
 + * netdev_dma_register - register the networking subsystem as a DMA client
   */
  static int __init netdev_dma_register(void)
  {
@@@ -4734,12 -4659,6 +4723,12 @@@ int netdev_compute_features(unsigned lo
                one |= NETIF_F_GSO_SOFTWARE;
        one |= NETIF_F_GSO;
  
 +      /*
 +       * If even one device supports a GSO protocol with software fallback,
 +       * enable it for all.
 +       */
 +      all |= one & NETIF_F_GSO_SOFTWARE;
 +
        /* If even one device supports robust GSO, enable it for all. */
        if (one & NETIF_F_GSO_ROBUST)
                all |= NETIF_F_GSO_ROBUST;
@@@ -4789,18 -4708,10 +4778,18 @@@ err_name
        return -ENOMEM;
  }
  
 -char *netdev_drivername(struct net_device *dev, char *buffer, int len)
 +/**
 + *    netdev_drivername - network driver for the device
 + *    @dev: network device
 + *    @buffer: buffer for resulting name
 + *    @len: size of buffer
 + *
 + *    Determine network driver for device.
 + */
 +char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
  {
 -      struct device_driver *driver;
 -      struct device *parent;
 +      const struct device_driver *driver;
 +      const struct device *parent;
  
        if (len <= 0 || !buffer)
                return buffer;
diff --combined net/core/rtnetlink.c
index 8862498fd4a6142673b45f896e5b5933c94832f2,d6381c2a46936751b8da284d8191bf0404593597..3630131fa1fa37e3245301e719db583d0d63db59
@@@ -73,7 -73,7 +73,7 @@@ void __rtnl_unlock(void
  
  void rtnl_unlock(void)
  {
-       mutex_unlock(&rtnl_mutex);
+       /* This fellow will unlock it for us. */
        netdev_run_todo();
  }
  
@@@ -586,7 -586,6 +586,7 @@@ static inline size_t if_nlmsg_size(cons
  {
        return NLMSG_ALIGN(sizeof(struct ifinfomsg))
               + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
 +             + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
               + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
               + nla_total_size(sizeof(struct rtnl_link_ifmap))
               + nla_total_size(sizeof(struct rtnl_link_stats))
@@@ -641,9 -640,6 +641,9 @@@ static int rtnl_fill_ifinfo(struct sk_b
        if (txq->qdisc_sleeping)
                NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
  
 +      if (dev->ifalias)
 +              NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
 +
        if (1) {
                struct rtnl_link_ifmap map = {
                        .mem_start   = dev->mem_start,
@@@ -717,7 -713,6 +717,7 @@@ const struct nla_policy ifla_policy[IFL
        [IFLA_LINKMODE]         = { .type = NLA_U8 },
        [IFLA_LINKINFO]         = { .type = NLA_NESTED },
        [IFLA_NET_NS_PID]       = { .type = NLA_U32 },
 +      [IFLA_IFALIAS]          = { .type = NLA_STRING, .len = IFALIASZ-1 },
  };
  
  static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@@ -858,14 -853,6 +858,14 @@@ static int do_setlink(struct net_devic
                modified = 1;
        }
  
 +      if (tb[IFLA_IFALIAS]) {
 +              err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
 +                                  nla_len(tb[IFLA_IFALIAS]));
 +              if (err < 0)
 +                      goto errout;
 +              modified = 1;
 +      }
 +
        if (tb[IFLA_BROADCAST]) {
                nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
                send_addr_notify = 1;
diff --combined net/ipv4/tcp_input.c
index 63da39372d4074e18e48cd99a291b242eef3b83e,7abc6b80d47d2fd6fd9ff539e23f50dc89147cec..d77c0d29e2396bb6c2e4ea4cc159019a106be325
@@@ -979,39 -979,6 +979,39 @@@ static void tcp_update_reordering(struc
        }
  }
  
 +/* This must be called before lost_out is incremented */
 +static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 +{
 +      if ((tp->retransmit_skb_hint == NULL) ||
 +          before(TCP_SKB_CB(skb)->seq,
 +                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
 +              tp->retransmit_skb_hint = skb;
 +
 +      if (!tp->lost_out ||
 +          after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
 +              tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 +}
 +
 +static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
 +{
 +      if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
 +              tcp_verify_retransmit_hint(tp, skb);
 +
 +              tp->lost_out += tcp_skb_pcount(skb);
 +              TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 +      }
 +}
 +
 +void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
 +{
 +      tcp_verify_retransmit_hint(tp, skb);
 +
 +      if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
 +              tp->lost_out += tcp_skb_pcount(skb);
 +              TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 +      }
 +}
 +
  /* This procedure tags the retransmission queue when SACKs arrive.
   *
   * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@@ -1188,7 -1155,13 +1188,7 @@@ static void tcp_mark_lost_retrans(struc
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
  
 -                      /* clear lost hint */
 -                      tp->retransmit_skb_hint = NULL;
 -
 -                      if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
 -                              tp->lost_out += tcp_skb_pcount(skb);
 -                              TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 -                      }
 +                      tcp_skb_mark_lost_uncond_verify(tp, skb);
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
                        if (before(ack_seq, new_low_seq))
@@@ -1298,6 -1271,9 +1298,6 @@@ static int tcp_sacktag_one(struct sk_bu
                                        ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
                                tp->lost_out -= tcp_skb_pcount(skb);
                                tp->retrans_out -= tcp_skb_pcount(skb);
 -
 -                              /* clear lost hint */
 -                              tp->retransmit_skb_hint = NULL;
                        }
                } else {
                        if (!(sacked & TCPCB_RETRANS)) {
                        if (sacked & TCPCB_LOST) {
                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                                tp->lost_out -= tcp_skb_pcount(skb);
 -
 -                              /* clear lost hint */
 -                              tp->retransmit_skb_hint = NULL;
                        }
                }
  
        if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                tp->retrans_out -= tcp_skb_pcount(skb);
 -              tp->retransmit_skb_hint = NULL;
        }
  
        return flag;
@@@ -1746,8 -1726,6 +1746,8 @@@ int tcp_use_frto(struct sock *sk
                return 0;
  
        skb = tcp_write_queue_head(sk);
 +      if (tcp_skb_is_last(sk, skb))
 +              return 1;
        skb = tcp_write_queue_next(sk, skb);    /* Skips head */
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
@@@ -1889,7 -1867,6 +1889,7 @@@ static void tcp_enter_frto_loss(struct 
                if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        tp->lost_out += tcp_skb_pcount(skb);
 +                      tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
                }
        }
        tcp_verify_left_out(tp);
        tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
  
 -      tcp_clear_retrans_hints_partial(tp);
 +      tcp_clear_all_retrans_hints(tp);
  }
  
  static void tcp_clear_retrans_partial(struct tcp_sock *tp)
@@@ -1957,11 -1934,12 +1957,11 @@@ void tcp_enter_loss(struct sock *sk, in
                /* Push undo marker, if it was plain RTO and nothing
                 * was retransmitted. */
                tp->undo_marker = tp->snd_una;
 -              tcp_clear_retrans_hints_partial(tp);
        } else {
                tp->sacked_out = 0;
                tp->fackets_out = 0;
 -              tcp_clear_all_retrans_hints(tp);
        }
 +      tcp_clear_all_retrans_hints(tp);
  
        tcp_for_write_queue(skb, sk) {
                if (skb == tcp_send_head(sk))
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        tp->lost_out += tcp_skb_pcount(skb);
 +                      tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
                }
        }
        tcp_verify_left_out(tp);
@@@ -2180,6 -2157,19 +2180,6 @@@ static int tcp_time_to_recover(struct s
        return 0;
  }
  
 -/* RFC: This is from the original, I doubt that this is necessary at all:
 - * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
 - * retransmitted past LOST markings in the first place? I'm not fully sure
 - * about undo and end of connection cases, which can cause R without L?
 - */
 -static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 -{
 -      if ((tp->retransmit_skb_hint != NULL) &&
 -          before(TCP_SKB_CB(skb)->seq,
 -                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
 -              tp->retransmit_skb_hint = NULL;
 -}
 -
  /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
   * is against sacked "cnt", otherwise it's against facked "cnt"
   */
@@@ -2227,7 -2217,11 +2227,7 @@@ static void tcp_mark_head_lost(struct s
                        cnt = packets;
                }
  
 -              if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
 -                      TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 -                      tp->lost_out += tcp_skb_pcount(skb);
 -                      tcp_verify_retransmit_hint(tp, skb);
 -              }
 +              tcp_skb_mark_lost(tp, skb);
        }
        tcp_verify_left_out(tp);
  }
@@@ -2269,7 -2263,11 +2269,7 @@@ static void tcp_update_scoreboard(struc
                        if (!tcp_skb_timedout(sk, skb))
                                break;
  
 -                      if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
 -                              TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 -                              tp->lost_out += tcp_skb_pcount(skb);
 -                              tcp_verify_retransmit_hint(tp, skb);
 -                      }
 +                      tcp_skb_mark_lost(tp, skb);
                }
  
                tp->scoreboard_skb_hint = skb;
@@@ -2380,6 -2378,10 +2380,6 @@@ static void tcp_undo_cwr(struct sock *s
        }
        tcp_moderate_cwnd(tp);
        tp->snd_cwnd_stamp = tcp_time_stamp;
 -
 -      /* There is something screwy going on with the retrans hints after
 -         an undo */
 -      tcp_clear_all_retrans_hints(tp);
  }
  
  static inline int tcp_may_undo(struct tcp_sock *tp)
@@@ -2836,8 -2838,7 +2836,8 @@@ static u32 tcp_tso_acked(struct sock *s
   * is before the ack sequence we can discard it as it's confirmed to have
   * arrived at the other end.
   */
 -static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
 +static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 +                             u32 prior_snd_una)
  {
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        int flag = 0;
        u32 pkts_acked = 0;
        u32 reord = tp->packets_out;
 +      u32 prior_sacked = tp->sacked_out;
        s32 seq_rtt = -1;
        s32 ca_seq_rtt = -1;
        ktime_t last_ackt = net_invalid_timestamp();
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
  
 -              if (unlikely(tp->urg_mode && !before(end_seq, tp->snd_up)))
 -                      tp->urg_mode = 0;
 -
                tp->packets_out -= acked_pcount;
                pkts_acked += acked_pcount;
  
  
                tcp_unlink_write_queue(skb, sk);
                sk_wmem_free_skb(sk, skb);
 -              tcp_clear_all_retrans_hints(tp);
 +              tp->scoreboard_skb_hint = NULL;
 +              if (skb == tp->retransmit_skb_hint)
 +                      tp->retransmit_skb_hint = NULL;
 +              if (skb == tp->lost_skb_hint)
 +                      tp->lost_skb_hint = NULL;
        }
  
 +      if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
 +              tp->snd_up = tp->snd_una;
 +
        if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
                flag |= FLAG_SACK_RENEGING;
  
                        /* Non-retransmitted hole got filled? That's reordering */
                        if (reord < prior_fackets)
                                tcp_update_reordering(sk, tp->fackets_out - reord, 0);
 +
 +                      /* No need to care for underflows here because
 +                       * the lost_skb_hint gets NULLed if we're past it
 +                       * (or something non-trivial happened)
 +                       */
 +                      if (tcp_is_fack(tp))
 +                              tp->lost_cnt_hint -= pkts_acked;
 +                      else
 +                              tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
                }
  
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@@ -3312,7 -3299,7 +3312,7 @@@ static int tcp_ack(struct sock *sk, str
                goto no_queue;
  
        /* See if we can take anything off of the retransmit queue. */
 -      flag |= tcp_clean_rtx_queue(sk, prior_fackets);
 +      flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
  
        if (tp->frto_counter)
                frto_cwnd = tcp_process_frto(sk, flag);
@@@ -3455,22 -3442,6 +3455,22 @@@ void tcp_parse_options(struct sk_buff *
        }
  }
  
 +static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
 +{
 +      __be32 *ptr = (__be32 *)(th + 1);
 +
 +      if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
 +                        | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
 +              tp->rx_opt.saw_tstamp = 1;
 +              ++ptr;
 +              tp->rx_opt.rcv_tsval = ntohl(*ptr);
 +              ++ptr;
 +              tp->rx_opt.rcv_tsecr = ntohl(*ptr);
 +              return 1;
 +      }
 +      return 0;
 +}
 +
  /* Fast parse options. This hopes to only see timestamps.
   * If it is wrong it falls back on tcp_parse_options().
   */
@@@ -3482,8 -3453,16 +3482,8 @@@ static int tcp_fast_parse_options(struc
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
                   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
 -              __be32 *ptr = (__be32 *)(th + 1);
 -              if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
 -                                | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
 -                      tp->rx_opt.saw_tstamp = 1;
 -                      ++ptr;
 -                      tp->rx_opt.rcv_tsval = ntohl(*ptr);
 -                      ++ptr;
 -                      tp->rx_opt.rcv_tsecr = ntohl(*ptr);
 +              if (tcp_parse_aligned_timestamp(tp, th))
                        return 1;
 -              }
        }
        tcp_parse_options(skb, &tp->rx_opt, 1);
        return 1;
@@@ -4159,7 -4138,7 +4159,7 @@@ drop
                                skb1 = skb1->prev;
                        }
                }
 -              __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
 +              __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
  
                /* And clean segments covered by new one as whole. */
                while ((skb1 = skb->next) !=
@@@ -4182,18 -4161,6 +4182,18 @@@ add_sack
        }
  }
  
 +static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
 +                                      struct sk_buff_head *list)
 +{
 +      struct sk_buff *next = skb->next;
 +
 +      __skb_unlink(skb, list);
 +      __kfree_skb(skb);
 +      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 +
 +      return next;
 +}
 +
  /* Collapse contiguous sequence of skbs head..tail with
   * sequence numbers start..end.
   * Segments with FIN/SYN are not collapsed (only because this
@@@ -4211,7 -4178,11 +4211,7 @@@ tcp_collapse(struct sock *sk, struct sk
        for (skb = head; skb != tail;) {
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
 -                      struct sk_buff *next = skb->next;
 -                      __skb_unlink(skb, list);
 -                      __kfree_skb(skb);
 -                      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 -                      skb = next;
 +                      skb = tcp_collapse_one(sk, skb, list);
                        continue;
                }
  
                memcpy(nskb->head, skb->head, header);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
 -              __skb_insert(nskb, skb->prev, skb, list);
 +              __skb_queue_before(list, skb, nskb);
                skb_set_owner_r(nskb, sk);
  
                /* Copy data, releasing collapsed skbs. */
                                start += size;
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
 -                              struct sk_buff *next = skb->next;
 -                              __skb_unlink(skb, list);
 -                              __kfree_skb(skb);
 -                              NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 -                              skb = next;
 +                              skb = tcp_collapse_one(sk, skb, list);
                                if (skb == tail ||
                                    tcp_hdr(skb)->syn ||
                                    tcp_hdr(skb)->fin)
@@@ -4461,8 -4436,8 +4461,8 @@@ static void tcp_new_space(struct sock *
  
        if (tcp_should_expand_sndbuf(sk)) {
                int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
 -                      MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
 -                  demanded = max_t(unsigned int, tp->snd_cwnd,
 +                      MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
 +              int demanded = max_t(unsigned int, tp->snd_cwnd,
                                     tp->reordering + 1);
                sndmem *= 2 * demanded;
                if (sndmem > sk->sk_sndbuf)
  }
  #endif /* CONFIG_NET_DMA */
  
 +/* Does PAWS and seqno based validation of an incoming segment, flags will
 + * play significant role here.
 + */
 +static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
 +                            struct tcphdr *th, int syn_inerr)
 +{
 +      struct tcp_sock *tp = tcp_sk(sk);
 +
 +      /* RFC1323: H1. Apply PAWS check first. */
 +      if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
 +          tcp_paws_discard(sk, skb)) {
 +              if (!th->rst) {
 +                      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 +                      tcp_send_dupack(sk, skb);
 +                      goto discard;
 +              }
 +              /* Reset is accepted even if it did not pass PAWS. */
 +      }
 +
 +      /* Step 1: check sequence number */
 +      if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
 +              /* RFC793, page 37: "In all states except SYN-SENT, all reset
 +               * (RST) segments are validated by checking their SEQ-fields."
 +               * And page 69: "If an incoming segment is not acceptable,
 +               * an acknowledgment should be sent in reply (unless the RST
 +               * bit is set, if so drop the segment and return)".
 +               */
 +              if (!th->rst)
 +                      tcp_send_dupack(sk, skb);
 +              goto discard;
 +      }
 +
 +      /* Step 2: check RST bit */
 +      if (th->rst) {
 +              tcp_reset(sk);
 +              goto discard;
 +      }
 +
 +      /* ts_recent update must be made after we are sure that the packet
 +       * is in window.
 +       */
 +      tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
 +
 +      /* step 3: check security and precedence [ignored] */
 +
 +      /* step 4: Check for a SYN in window. */
 +      if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 +              if (syn_inerr)
 +                      TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 +              NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
 +              tcp_reset(sk);
 +              return -1;
 +      }
 +
 +      return 1;
 +
 +discard:
 +      __kfree_skb(skb);
 +      return 0;
 +}
 +
  /*
   *    TCP receive function for the ESTABLISHED state.
   *
@@@ -4804,7 -4718,6 +4804,7 @@@ int tcp_rcv_established(struct sock *sk
                        struct tcphdr *th, unsigned len)
  {
        struct tcp_sock *tp = tcp_sk(sk);
 +      int res;
  
        /*
         *      Header prediction.
  
                /* Check timestamp */
                if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
 -                      __be32 *ptr = (__be32 *)(th + 1);
 -
                        /* No? Slow path! */
 -                      if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
 -                                        | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
 +                      if (!tcp_parse_aligned_timestamp(tp, th))
                                goto slow_path;
  
 -                      tp->rx_opt.saw_tstamp = 1;
 -                      ++ptr;
 -                      tp->rx_opt.rcv_tsval = ntohl(*ptr);
 -                      ++ptr;
 -                      tp->rx_opt.rcv_tsecr = ntohl(*ptr);
 -
                        /* If PAWS failed, check it more carefully in slow path */
                        if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
                                goto slow_path;
                                        goto no_ack;
                        }
  
-                       __tcp_ack_snd_check(sk, 0);
+                       if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
+                               __tcp_ack_snd_check(sk, 0);
  no_ack:
  #ifdef CONFIG_NET_DMA
                        if (copied_early)
@@@ -4976,13 -4899,52 +4977,13 @@@ slow_path
        if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
  
 -      /*
 -       * RFC1323: H1. Apply PAWS check first.
 -       */
 -      if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
 -          tcp_paws_discard(sk, skb)) {
 -              if (!th->rst) {
 -                      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 -                      tcp_send_dupack(sk, skb);
 -                      goto discard;
 -              }
 -              /* Resets are accepted even if PAWS failed.
 -
 -                 ts_recent update must be made after we are sure
 -                 that the packet is in window.
 -               */
 -      }
 -
        /*
         *      Standard slow path.
         */
  
 -      if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
 -              /* RFC793, page 37: "In all states except SYN-SENT, all reset
 -               * (RST) segments are validated by checking their SEQ-fields."
 -               * And page 69: "If an incoming segment is not acceptable,
 -               * an acknowledgment should be sent in reply (unless the RST bit
 -               * is set, if so drop the segment and return)".
 -               */
 -              if (!th->rst)
 -                      tcp_send_dupack(sk, skb);
 -              goto discard;
 -      }
 -
 -      if (th->rst) {
 -              tcp_reset(sk);
 -              goto discard;
 -      }
 -
 -      tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
 -
 -      if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 -              TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 -              NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
 -              tcp_reset(sk);
 -              return 1;
 -      }
 +      res = tcp_validate_incoming(sk, skb, th, 1);
 +      if (res <= 0)
 +              return -res;
  
  step5:
        if (th->ack)
@@@ -5264,7 -5226,6 +5265,7 @@@ int tcp_rcv_state_process(struct sock *
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        int queued = 0;
 +      int res;
  
        tp->rx_opt.saw_tstamp = 0;
  
                return 0;
        }
  
 -      if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
 -          tcp_paws_discard(sk, skb)) {
 -              if (!th->rst) {
 -                      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 -                      tcp_send_dupack(sk, skb);
 -                      goto discard;
 -              }
 -              /* Reset is accepted even if it did not pass PAWS. */
 -      }
 -
 -      /* step 1: check sequence number */
 -      if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
 -              if (!th->rst)
 -                      tcp_send_dupack(sk, skb);
 -              goto discard;
 -      }
 -
 -      /* step 2: check RST bit */
 -      if (th->rst) {
 -              tcp_reset(sk);
 -              goto discard;
 -      }
 -
 -      tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
 -
 -      /* step 3: check security and precedence [ignored] */
 -
 -      /*      step 4:
 -       *
 -       *      Check for a SYN in window.
 -       */
 -      if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 -              NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
 -              tcp_reset(sk);
 -              return 1;
 -      }
 +      res = tcp_validate_incoming(sk, skb, th, 0);
 +      if (res <= 0)
 +              return -res;
  
        /* step 5: check the ACK field */
        if (th->ack) {