summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/intel/e1000e
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/net/ethernet/intel/e1000e')
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c1418
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h88
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/82571.c2063
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/82571.h53
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/Makefile37
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/defines.h800
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/e1000.h595
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/ethtool.c2322
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/hw.h698
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c5802
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h303
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/mac.c1800
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/mac.h68
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/manage.c347
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/manage.h65
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/netdev.c7316
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/nvm.c633
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/nvm.h40
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/param.c533
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/phy.c3237
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/phy.h236
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/ptp.c273
-rw-r--r--kernel/drivers/net/ethernet/intel/e1000e/regs.h250
23 files changed, 28977 insertions, 0 deletions
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c
new file mode 100644
index 000000000..08f22f348
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -0,0 +1,1418 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000.h"
+
+/* A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
+};
+
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_gg82563_cable_length_table)
+
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+ phy->type = e1000_phy_gg82563;
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000e_get_phy_id(hw);
+
+ /* Verify phy id */
+ if (phy->id != GG82563_E_PHY_ID)
+ return -E1000_ERR_PHY;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u16 size;
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->adapter->pdev->device) {
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.check_for_link = e1000e_check_for_serdes_link;
+ mac->ops.setup_physical_interface =
+ e1000e_setup_fiber_serdes_link;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.check_for_link = e1000e_check_for_copper_link;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_80003es2lan;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return 0;
+}
+
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 rc;
+
+ rc = e1000_init_mac_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_phy_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_phy_80003es2lan - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the Kumeran interface
+ **/
+static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the EEPROM.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_acquire_nvm(hw);
+
+ if (ret_val)
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the EEPROM.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_release_nvm(hw);
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 i = 0;
+ s32 timeout = 50;
+
+ while (i < timeout) {
+ if (e1000e_get_hw_semaphore(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = er32(SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000e_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (e1000e_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
+
+ swfw_sync = er32(SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+}
+
+/**
+ * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: pointer to the data returned from the operation
+ *
+ * Read the GG82563 PHY register.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usleep_range(200, 400);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usleep_range(200, 400);
+
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ usleep_range(200, 400);
+ } else {
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: value to write to the register
+ *
+ * Write to the GG82563 PHY register.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usleep_range(200, 400);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usleep_range(200, 400);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
+
+ usleep_range(200, 400);
+ } else {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @words: number of words to write
+ * @data: buffer of data to write to the NVM
+ *
+ * Write "words" of data to the ESB2 NVM.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return e1000e_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ * @hw: pointer to the HW structure
+ *
+ * Wait a specific amount of time for manageability processes to complete.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+
+ while (timeout) {
+ if (er32(EEMNGCTL) & mask)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout) {
+ e_dbg("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ * @hw: pointer to the HW structure
+ *
+ * Force the speed and duplex settings onto the PHY. This is a
+ * function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("GG82563 PSCR: %X\n", phy_data);
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ /* Reset the phy to commit changes. */
+ phy_data |= BMCR_RESET;
+
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ if (hw->phy.autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1000e_phy_reset_dsp(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to verify the TX_CLK corresponds
+ * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
+ */
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_80003es2lan - Set approximate cable length
+ * @hw: pointer to the HW structure
+ *
+ * Find the approximate cable length as measured by the GG82563 PHY.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return 0;
+}
+
+/**
+ * e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
+ hw->phy.ops.cfg_on_link_up(hw);
+ } else {
+ ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
+ speed,
+ duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Perform a global reset to the ESB2 controller.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 kum_reg_data;
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ ctrl = er32(CTRL);
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Issuing a global reset to MAC\n");
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+ e1000_release_phy_80003es2lan(hw);
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Clear any pending interrupt events. */
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ return e1000_check_alt_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 kum_reg_data;
+ u16 i;
+
+ e1000_initialize_hw_bits_80003es2lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ e_dbg("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = er32(TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ ew32(TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ reg_data = er32(TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ ew32(TXDCTL(1), reg_data);
+
+ /* Enable retransmit on late collisions */
+ reg_data = er32(TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ ew32(TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = er32(TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+ ew32(TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = er32(TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ if (hw->phy.media_type != e1000_media_type_copper)
+ reg &= ~(1 << 20);
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ ew32(TARC(1), reg);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ reg = er32(RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ ew32(RFCTL, reg);
+}
+
+/**
+ * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ * @hw: pointer to the HW structure
+ *
+ * Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 reg;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Bypass Rx and Tx FIFO's */
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
+ if (ret_val)
+ return ret_val;
+ data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ ew32(CTRL_EXT, reg);
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_ICR_DIS_PADDING;
+ ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Essentially a wrapper for setting up all things "copper" related.
+ * This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
+ * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 speed;
+ u16 duplex;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
+ &duplex);
+ if (ret_val)
+ return ret_val;
+
+ if (speed == SPEED_1000)
+ ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+ else
+ ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val;
+ u32 tipg;
+ u32 i = 0;
+ u16 reg_data, reg_data2;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ * @hw: pointer to the HW structure
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data, reg_data2;
+ u32 tipg;
+ u32 i = 0;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquire semaphore, then read the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release the semaphore before exiting.
+ **/
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ kmrnctrlsta = er32(KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquire semaphore, then write the data to PHY register
+ * at the offset using the kumeran interface. Release semaphore
+ * before exiting.
+ **/
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ er32(ICRXPTC);
+ er32(ICRXATC);
+ er32(ICTXPTC);
+ er32(ICTXATC);
+ er32(ICTXQEC);
+ er32(ICTXQMTC);
+ er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
+ .id_led_init = e1000e_id_led_init_generic,
+ .blink_led = e1000e_blink_led_generic,
+ .check_mng_mode = e1000e_check_mng_mode_generic,
+ /* check_for_link dependent on media type */
+ .cleanup_led = e1000e_cleanup_led_generic,
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
+ .get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
+ .get_link_up_info = e1000_get_link_up_info_80003es2lan,
+ .led_on = e1000e_led_on_generic,
+ .led_off = e1000e_led_off_generic,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .write_vfta = e1000_write_vfta_generic,
+ .clear_vfta = e1000_clear_vfta_generic,
+ .reset_hw = e1000_reset_hw_80003es2lan,
+ .init_hw = e1000_init_hw_80003es2lan,
+ .setup_link = e1000e_setup_link_generic,
+ /* setup_physical_interface dependent on media type */
+ .setup_led = e1000e_setup_led_generic,
+ .config_collision_dist = e1000e_config_collision_dist_generic,
+ .rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
+};
+
+static const struct e1000_phy_operations es2_phy_ops = {
+ .acquire = e1000_acquire_phy_80003es2lan,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .release = e1000_release_phy_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+};
+
+static const struct e1000_nvm_operations es2_nvm_ops = {
+ .acquire = e1000_acquire_nvm_80003es2lan,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_80003es2lan,
+ .reload = e1000e_reload_nvm_generic,
+ .update = e1000e_update_nvm_checksum_generic,
+ .valid_led_default = e1000e_valid_led_default,
+ .validate = e1000e_validate_nvm_checksum_generic,
+ .write = e1000_write_nvm_80003es2lan,
+};
+
+const struct e1000_info e1000_es2_info = {
+ .mac = e1000_80003es2lan,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_RX_NEEDS_RESTART /* errata */
+ | FLAG_TARC_SET_BIT_ZERO /* errata */
+ | FLAG_APME_CHECK_PORT_B
+ | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */
+ .flags2 = FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_80003es2lan,
+ .mac_ops = &es2_mac_ops,
+ .phy_ops = &es2_phy_ops,
+ .nvm_ops = &es2_nvm_ops,
+};
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644
index 000000000..535a94309
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -0,0 +1,88 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/82571.c b/kernel/drivers/net/ethernet/intel/e1000e/82571.c
new file mode 100644
index 000000000..dc79ed850
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/82571.c
@@ -0,0 +1,2063 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+static s32 e1000_led_on_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
+
+/**
+ * e1000_init_phy_params_82571 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82571;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ phy->type = e1000_phy_igp_2;
+ break;
+ case e1000_82573:
+ phy->type = e1000_phy_m88;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ phy->type = e1000_phy_bm;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ e_dbg("Error getting PHY ID\n");
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (phy->id != IGP01E1000_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82573:
+ if (phy->id != M88E1111_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ if (phy->id != BME1000_E_PHY_ID_R2)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u16 size;
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (((eecd >> 15) & 0x3) == 0x3) {
+ nvm->type = e1000_nvm_flash_hw;
+ nvm->word_size = 2048;
+ /* Autonomous Flash update bit must be cleared due
+ * to Flash update issue.
+ */
+ eecd &= ~E1000_EECD_AUPDEN;
+ ew32(EECD, eecd);
+ break;
+ }
+ /* Fall Through */
+ default:
+ nvm->type = e1000_nvm_eeprom_spi;
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+ break;
+ }
+
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_82571 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->adapter->pdev->device) {
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000e_check_for_fiber_link;
+ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
+ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_82571;
+ mac->ops.check_for_link = e1000e_check_for_copper_link;
+ mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* MAC-specific function pointers */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
+ mac->ops.led_on = e1000e_led_on_generic;
+ mac->ops.blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = !!(er32(FWSM) &
+ E1000_FWSM_MODE_MASK);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
+ mac->ops.led_on = e1000_led_on_82574;
+ break;
+ default:
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
+ mac->ops.led_on = e1000e_led_on_generic;
+ mac->ops.blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ break;
+ }
+
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY access. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = er32(SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK);
+ force_clear_smbi = true;
+ } else {
+ force_clear_smbi = false;
+ }
+ break;
+ default:
+ force_clear_smbi = true;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = er32(SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ e_dbg("Please update your 82571 Bootagent\n");
+ }
+ ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /* Initialize device specific counter of SMBI acquisition timeouts. */
+ hw->dev_spec.e82571.smb_counter = 0;
+
+ return 0;
+}
+
+static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ static int global_quad_port_a; /* global port a indication */
+ struct pci_dev *pdev = adapter->pdev;
+ int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
+ s32 rc;
+
+ rc = e1000_init_mac_params_82571(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_82571(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_phy_params_82571(hw);
+ if (rc)
+ return rc;
+
+ /* tag quad port adapters first, it's used below */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ adapter->flags |= FLAG_IS_QUAD_PORT;
+ /* mark the first port */
+ if (global_quad_port_a == 0)
+ adapter->flags |= FLAG_IS_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ global_quad_port_a++;
+ if (global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ default:
+ break;
+ }
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82571:
+ /* these dual ports don't have WoL on port B at all */
+ if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
+ (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
+ (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
+ (is_port_b))
+ adapter->flags &= ~FLAG_HAS_WOL;
+ /* quad ports only support WoL on port A */
+ if (adapter->flags & FLAG_IS_QUAD_PORT &&
+ (!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
+ adapter->flags &= ~FLAG_HAS_WOL;
+ /* Does not support WoL on any port */
+ if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
+ adapter->flags &= ~FLAG_HAS_WOL;
+ break;
+ case e1000_82573:
+ if (pdev->device == E1000_DEV_ID_82573L) {
+ adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = DEFAULT_JUMBO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_id = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* The 82571 firmware may still be configuring the PHY.
+ * In this case, we cannot access the PHY until the
+ * configuration is done. So we explicitly set the
+ * PHY ID.
+ */
+ phy->id = IGP01E1000_I_PHY_ID;
+ break;
+ case e1000_82573:
+ return e1000e_get_phy_id(hw);
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usleep_range(20, 40);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec.e82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = er32(SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usleep_range(50, 100);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ e_dbg("Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec.e82571.smb_counter++;
+ }
+ /* Get the FW semaphore. */
+ for (i = 0; i < fw_timeout; i++) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (er32(SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usleep_range(50, 100);
+ }
+
+ if (i == fw_timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82571(hw);
+ e_dbg("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = er32(SWSM);
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 i = 0;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ usleep_range(2000, 4000);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ e_dbg("Driver can't access the PHY\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ mutex_lock(&swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ e1000_put_hw_semaphore_82573(hw);
+ mutex_unlock(&swflag_mutex);
+}
+
+/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = er32(POEMB);
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = er32(POEMB);
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ * Then for non-82573 hardware, set the EEPROM access request bit and wait
+ * for EEPROM access grant bit. If the access grant bit is not set, release
+ * hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1000_get_hw_semaphore_82571(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ break;
+ default:
+ ret_val = e1000e_acquire_nvm(hw);
+ break;
+ }
+
+ if (ret_val)
+ e1000_put_hw_semaphore_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+ e1000e_release_nvm(hw);
+ e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
+ break;
+ default:
+ ret_val = -E1000_ERR_NVM;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ u32 eecd;
+ s32 ret_val;
+ u16 i;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* If our nvm is an EEPROM, then we're done
+ * otherwise, commit the checksum to the flash NVM.
+ */
+ if (hw->nvm.type != e1000_nvm_flash_hw)
+ return 0;
+
+ /* Check for pending operations. */
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ usleep_range(1000, 2000);
+ if (!(er32(EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ /* Reset the firmware if using STM opcode. */
+ if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+ /* The enabling of and the actual reset must be done
+ * in two write cycles.
+ */
+ ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
+ e1e_flush();
+ ew32(HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Commit the write to flash */
+ eecd = er32(EECD) | E1000_EECD_FLUPD;
+ ew32(EECD, eecd);
+
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ usleep_range(1000, 2000);
+ if (!(er32(EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ if (hw->nvm.type == e1000_nvm_flash_hw)
+ e1000_fix_nvm_checksum_82571(hw);
+
+ return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * After checking for invalid values, poll the EEPROM to ensure the previous
+ * command has completed before trying to write the next word. After write
+ * poll for completion.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eewr = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
+
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+
+ ew32(EEWR, eewr);
+
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_82571 - Poll for configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+
+ while (timeout) {
+ if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout) {
+ e_dbg("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When activating LPLU
+ * this function also disables smart speed and vice versa. LPLU will not be
+ * activated unless the device autonegotiation advertisement meets standards
+ * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
+ * pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_reset_hw_82571 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext, eecd, tctl;
+ s32 ret_val;
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ ew32(RCTL, 0);
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ /* Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_get_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ ctrl = er32(CTRL);
+
+ e_dbg("Issuing a global reset to MAC\n");
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->nvm.type == e1000_nvm_flash_hw) {
+ usleep_range(10, 20);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+ }
+
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ * Need to wait for Phy configuration completion before accessing
+ * NVM and Phy.
+ */
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* REQ and GNT bits need to be cleared when using AUTO_RD
+ * to access the EEPROM.
+ */
+ eecd = er32(EECD);
+ eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+ ew32(EECD, eecd);
+ break;
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ msleep(25);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear any pending interrupt events. */
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
+ }
+
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+ return 0;
+}
+
+/**
+ * e1000_init_hw_82571 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ e1000_initialize_hw_bits_82571(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ e_dbg("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address.
+ * If, however, a locally administered address was assigned to the
+ * 82571, we must reserve a RAR for it to work around an issue where
+ * resetting one port will reload the MAC on the other port.
+ */
+ if (e1000e_get_laa_state_82571(hw))
+ rar_count--;
+ e1000e_init_rx_addrs(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = er32(TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ ew32(TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ switch (mac->type) {
+ case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
+ case e1000_82574:
+ case e1000_82583:
+ reg_data = er32(GCR);
+ reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ ew32(GCR, reg_data);
+ break;
+ default:
+ reg_data = er32(TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
+ ew32(TXDCTL(1), reg_data);
+ break;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ reg |= (1 << 26);
+ break;
+ default:
+ break;
+ }
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg &= ~((1 << 29) | (1 << 30));
+ reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ ew32(TARC(1), reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(CTRL);
+ reg &= ~(1 << 29);
+ ew32(CTRL, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Extended Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(CTRL_EXT);
+ reg &= ~(1 << 23);
+ reg |= (1 << 22);
+ ew32(CTRL_EXT, reg);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.type == e1000_82571) {
+ reg = er32(PBA_ECC);
+ reg |= E1000_PBA_ECC_CORR_EN;
+ ew32(PBA_ECC, reg);
+ }
+
+ /* Workaround for hardware errata.
+ * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+ */
+ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, reg);
+ }
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type <= e1000_82573) {
+ reg = er32(RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ ew32(RFCTL, reg);
+ }
+
+ /* PCI-Ex Control Registers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(GCR);
+ reg |= (1 << 22);
+ ew32(GCR, reg);
+
+ /* Workaround for hardware errata.
+ * apply workaround for hardware errata documented in errata
+ * docs Fixes issue where some error prone or unreliable PCIe
+ * completions are occurring, particularly with ASPM enabled.
+ * Without fix, issue can cause Tx timeouts.
+ */
+ reg = er32(GCR2);
+ reg |= 1;
+ ew32(GCR2, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_clear_vfta_82571 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->mng_cookie.vlan_id != 0) {
+ /* The VFTA is a 4096b bit-field, each identifying
+ * a single VLAN ID. The following operations
+ * determine which 32b entry (i.e. offset) into the
+ * array we want to set the VLAN ID (i.e. bit) of
+ * the manageability unit.
+ */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ break;
+ default:
+ break;
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of
+ * the manageability unit.
+ */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+ e1e_flush();
+ }
+}
+
+/**
+ * e1000_check_mng_mode_82574 - Check manageability is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Reads the NVM Initialization Control Word 2 and returns true
+ * (>0) if any manageability is enabled, else false (0).
+ **/
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+ u16 data;
+
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ * e1000_led_on_82574 - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+static s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+
+ ctrl = hw->mac.ledctl_mode2;
+ if (!(E1000_STATUS_LU & er32(STATUS))) {
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's "on" (0x0E) in ledctl_mode2.
+ */
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+ }
+ ew32(LEDCTL, ctrl);
+
+ return 0;
+}
+
+/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ s32 ret_val;
+
+ /* Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+ if (ret_val)
+ return false;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+ if (ret_val)
+ return false;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e1000_setup_link_82571 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+ /* 82573 does not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ default:
+ break;
+ }
+
+ return e1000e_setup_link_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_82571 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ case e1000_phy_bm:
+ ret_val = e1000e_copper_link_setup_m88(hw);
+ break;
+ case e1000_phy_igp_2:
+ ret_val = e1000e_copper_link_setup_igp(hw);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ if (ret_val)
+ return ret_val;
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes links.
+ * Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* If SerDes loopback mode is entered, there is no form
+ * of reset to take the adapter out of that mode. So we
+ * have to explicitly take the adapter out of loopback
+ * mode. This prevents drivers from twiddling their thumbs
+ * if another tool failed to take it out of loopback mode.
+ */
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ break;
+ default:
+ break;
+ }
+
+ return e1000e_setup_fiber_serdes_link(hw);
+}
+
+/**
+ * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Reports the link state as up or down.
+ *
+ * If autonegotiation is supported by the link partner, the link state is
+ * determined by the result of autonegotiation. This is the most likely case.
+ * If autonegotiation is not supported by the link partner, and the link
+ * has a valid signal, force the link up.
+ *
+ * The link state is represented internally here by 4 states:
+ *
+ * 1) down
+ * 2) autoneg_progress
+ * 3) autoneg_complete (the link successfully autonegotiated)
+ * 4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ u32 txcw;
+ u32 i;
+ s32 ret_val = 0;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ er32(RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ usleep_range(10, 20);
+ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+ /* Receiver is synchronized with no invalid bits. */
+ switch (mac->serdes_link_state) {
+ case e1000_serdes_link_autoneg_complete:
+ if (!(status & E1000_STATUS_LU)) {
+ /* We have lost link, retry autoneg before
+ * reporting link failure
+ */
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_forced_up:
+ /* If we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+ */
+ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_autoneg_progress:
+ if (rxcw & E1000_RXCW_C) {
+ /* We received /C/ ordered sets, meaning the
+ * link partner has autonegotiated, and we can
+ * trust the Link Up (LU) status bit.
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ e_dbg("AN_PROG -> AN_UP\n");
+ mac->serdes_has_link = true;
+ } else {
+ /* Autoneg completed, but failed. */
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("AN_PROG -> DOWN\n");
+ }
+ } else {
+ /* The link partner did not autoneg.
+ * Force link up and full duplex, and change
+ * state to forced.
+ */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error config flow control\n");
+ break;
+ }
+ mac->serdes_link_state =
+ e1000_serdes_link_forced_up;
+ mac->serdes_has_link = true;
+ e_dbg("AN_PROG -> FORCED_UP\n");
+ }
+ break;
+
+ case e1000_serdes_link_down:
+ default:
+ /* The link was down but the receiver has now gained
+ * valid sync, so lets see if we can bring the link
+ * up.
+ */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("DOWN -> AN_PROG\n");
+ break;
+ }
+ } else {
+ if (!(rxcw & E1000_RXCW_SYNCH)) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state = e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ } else {
+ /* Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
+ */
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ usleep_range(10, 20);
+ rxcw = er32(RXCW);
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = er32(TXCW);
+ txcw |= E1000_TXCW_ANE;
+ ew32(TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("ANYSTATE -> AN_PROG\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82571 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (*data == ID_LED_RESERVED_F746)
+ *data = ID_LED_DEFAULT_82573;
+ break;
+ default:
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_get_laa_state_82571 - Get locally administered address state
+ * @hw: pointer to the HW structure
+ *
+ * Retrieve and return the current locally administered address state.
+ **/
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
+{
+ if (hw->mac.type != e1000_82571)
+ return false;
+
+ return hw->dev_spec.e82571.laa_is_present;
+}
+
+/**
+ * e1000e_set_laa_state_82571 - Set locally administered address state
+ * @hw: pointer to the HW structure
+ * @state: enable/disable locally administered address
+ *
+ * Enable/Disable the current locally administered address state.
+ **/
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+ if (hw->mac.type != e1000_82571)
+ return;
+
+ hw->dev_spec.e82571.laa_is_present = state;
+
+ /* If workaround is activated... */
+ if (state)
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed, the actual LAA is in one of the RARs and no
+ * incoming packets directed to this port are dropped.
+ * Eventually the LAA will be in RAR[0] and RAR[14].
+ */
+ hw->mac.ops.rar_set(hw, hw->mac.addr,
+ hw->mac.rar_entry_count - 1);
+}
+
+/**
+ * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Verifies that the EEPROM has completed the update. After updating the
+ * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
+ * the checksum fix is not implemented, we need to set the bit and update
+ * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
+ * we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 data;
+
+ if (nvm->type != e1000_nvm_flash_hw)
+ return 0;
+
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed.
+ */
+ ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x10)) {
+ /* Read 0x23 and check bit 15. This bit is a 1
+ * when the checksum has already been fixed. If
+ * the checksum is still wrong and this bit is a
+ * 1, we need to return bad checksum. Otherwise,
+ * we need to set this bit to a 1 and update the
+ * checksum.
+ */
+ ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x8000)) {
+ data |= 0x8000;
+ ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ if (hw->mac.type == e1000_82571) {
+ s32 ret_val;
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!phy->ops.check_reset_block)
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ er32(ICRXPTC);
+ er32(ICRXATC);
+ er32(ICTXPTC);
+ er32(ICTXATC);
+ er32(ICTXQEC);
+ er32(ICTXQMTC);
+ er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations e82571_mac_ops = {
+ /* .check_mng_mode: mac type dependent */
+ /* .check_for_link: media type dependent */
+ .id_led_init = e1000e_id_led_init_generic,
+ .cleanup_led = e1000e_cleanup_led_generic,
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
+ .get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
+ /* .get_link_up_info: media type dependent */
+ /* .led_on: mac type dependent */
+ .led_off = e1000e_led_off_generic,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .write_vfta = e1000_write_vfta_generic,
+ .clear_vfta = e1000_clear_vfta_82571,
+ .reset_hw = e1000_reset_hw_82571,
+ .init_hw = e1000_init_hw_82571,
+ .setup_link = e1000_setup_link_82571,
+ /* .setup_physical_interface: media type dependent */
+ .setup_led = e1000e_setup_led_generic,
+ .config_collision_dist = e1000e_config_collision_dist_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
+ .rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_igp = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_igp,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = NULL,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
+ .get_cfg_done = e1000_get_cfg_done_82571,
+ .get_cable_length = e1000e_get_cable_length_igp_2,
+ .get_info = e1000e_get_phy_info_igp,
+ .read_reg = e1000e_read_phy_reg_igp,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_igp,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_m88 = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
+ .get_cable_length = e1000e_get_cable_length_m88,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000e_read_phy_reg_m88,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_m88,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_bm = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
+ .get_cable_length = e1000e_get_cable_length_m88,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000e_read_phy_reg_bm2,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_bm2,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_nvm_operations e82571_nvm_ops = {
+ .acquire = e1000_acquire_nvm_82571,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_82571,
+ .reload = e1000e_reload_nvm_generic,
+ .update = e1000_update_nvm_checksum_82571,
+ .valid_led_default = e1000_valid_led_default_82571,
+ .validate = e1000_validate_nvm_checksum_82571,
+ .write = e1000_write_nvm_82571,
+};
+
+const struct e1000_info e1000_82571_info = {
+ .mac = e1000_82571,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_RESET_OVERWRITES_LAA /* errata */
+ | FLAG_TARC_SPEED_MODE_BIT /* errata */
+ | FLAG_APME_CHECK_PORT_B,
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_igp,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82572_info = {
+ .mac = e1000_82572,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_TARC_SPEED_MODE_BIT, /* errata */
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_igp,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82573_info = {
+ .mac = e1000_82573,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_SWSM_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L1
+ | FLAG2_DISABLE_ASPM_L0S,
+ .pba = 20,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_m88,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82574_info = {
+ .mac = e1000_82574,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_MSIX
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_CHECK_PHY_HANG
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1
+ | FLAG2_NO_DISABLE_RX
+ | FLAG2_DMA_BURST,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_bm,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82583_info = {
+ .mac = e1000_82583,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1
+ | FLAG2_NO_DISABLE_RX,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_bm,
+ .nvm_ops = &e82571_nvm_ops,
+};
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/82571.h b/kernel/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644
index 000000000..2e758f796
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/82571.h
@@ -0,0 +1,53 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/Makefile b/kernel/drivers/net/ethernet/intel/e1000e/Makefile
new file mode 100644
index 000000000..106de4933
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/Makefile
@@ -0,0 +1,37 @@
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2014 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_E1000E) += e1000e.o
+
+e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o ptp.o
+
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/defines.h b/kernel/drivers/net/ethernet/intel/e1000e/defines.h
new file mode 100644
index 000000000..0570c668e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/defines.h
@@ -0,0 +1,800 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK 0x00001000
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x1
+#define E1000_SWFW_PHY0_SM 0x2
+#define E1000_SWFW_PHY1_SM 0x4
+#define E1000_SWFW_CSR_SM 0x8
+
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_MASK 0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define E1000_TX_PTR_GAP 0x1F
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+
+#define E1000_PBA_RXA_MASK 0xFFFF
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
+#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
+#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of desc. still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type (0-small, 1-large) */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
+#define E1000_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define BME1000_E_PHY_ID 0x01410CB0
+#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
+#define I217_E_PHY_ID 0x015400A0
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL \
+ GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_PAGE_SELECT \
+ GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 \
+ GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT \
+ GG82563_REG(0, 29) /* Alternate Page Select */
+
+#define GG82563_PHY_MAC_SPEC_CTRL \
+ GG82563_REG(2, 21) /* MAC Specific Control Register */
+
+#define GG82563_PHY_DSP_DISTANCE \
+ GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL \
+ GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PWR_MGMT_CTRL \
+ GG82563_REG(193, 20) /* Power Management Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL \
+ GG82563_REG(194, 18) /* Inband Control */
+
+/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_POLL_TIMEOUT 640
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/e1000.h b/kernel/drivers/net/ethernet/intel/e1000e/e1000.h
new file mode 100644
index 000000000..0abc942c9
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -0,0 +1,595 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/mii.h>
+#include <linux/mdio.h>
+#include <linux/pm_qos.h>
+#include "hw.h"
+
+struct e1000_info;
+
+#define e_dbg(format, arg...) \
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define E1000E_INT_MODE_LEGACY 0
+#define E1000E_INT_MODE_MSI 1
+#define E1000E_INT_MODE_MSIX 2
+
+/* Tx/Rx descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 4096
+#define E1000_MIN_TXD 64
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 4096
+#define E1000_MIN_RXD 64
+
+#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_APME 0x0400
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+#define DEFAULT_JUMBO 9234
+
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
+/* Count for polling __E1000_RESET condition every 10-20msec.
+ * Experimentation has shown the reset can take approximately 210msec.
+ */
+#define E1000_CHECK_RESET_COUNT 25
+
+#define DEFAULT_RDTR 0
+#define DEFAULT_RADV 8
+#define BURST_RDTR 0x20
+#define BURST_RADV 0x20
+
+/* in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, so a setting of 5 gives the most efficient bus
+ * utilization but to avoid possible Tx stalls, set it to 1
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+ (1 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE \
+ (0x01000000 | /* set descriptor granularity */ \
+ (4 << 16) | /* set writeback threshold */ \
+ (4 << 8) | /* set prefetch threshold */ \
+ 0x20) /* set hthresh */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
+enum e1000_boards {
+ board_82571,
+ board_82572,
+ board_82573,
+ board_82574,
+ board_82583,
+ board_80003es2lan,
+ board_ich8lan,
+ board_ich9lan,
+ board_ich10lan,
+ board_pchlan,
+ board_pch2lan,
+ board_pch_lpt,
+ board_pch_spt
+};
+
+struct e1000_ps_page {
+ struct page *page;
+ u64 dma; /* must be u64 - written to hw */
+};
+
+/* wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct e1000_buffer {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+ union {
+ /* Tx */
+ struct {
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
+ u16 mapped_as_page;
+ };
+ /* Rx */
+ struct {
+ /* arrays of page information for packet split */
+ struct e1000_ps_page *ps_pages;
+ struct page *page;
+ };
+ };
+};
+
+struct e1000_ring {
+ struct e1000_adapter *adapter; /* back pointer to adapter */
+ void *desc; /* pointer to ring memory */
+ dma_addr_t dma; /* phys address of ring */
+ unsigned int size; /* length of ring in bytes */
+ unsigned int count; /* number of desc. in ring */
+
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ void __iomem *head;
+ void __iomem *tail;
+
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ char name[IFNAMSIZ + 5];
+ u32 ims_val;
+ u32 itr_val;
+ void __iomem *itr_register;
+ int set_itr;
+
+ struct sk_buff *rx_skb_top;
+};
+
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+ u16 bmcr; /* basic mode control register */
+ u16 bmsr; /* basic mode status register */
+ u16 advertise; /* auto-negotiation advertisement */
+ u16 lpa; /* link partner ability register */
+ u16 expansion; /* auto-negotiation expansion reg */
+ u16 ctrl1000; /* 1000BASE-T control register */
+ u16 stat1000; /* 1000BASE-T status register */
+ u16 estatus; /* extended status register */
+};
+
+/* board specific private data structure */
+struct e1000_adapter {
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct timer_list blink_timer;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+
+ const struct e1000_info *ei;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u16 mng_vlan_id;
+ u16 link_speed;
+ u16 link_duplex;
+ u16 eeprom_vers;
+
+ /* track device up/down/testing state */
+ unsigned long state;
+
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /* Tx - one ring per active queue */
+ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
+ u32 tx_fifo_limit;
+
+ struct napi_struct napi;
+
+ unsigned int uncorr_errors; /* uncorrectable ECC errors */
+ unsigned int corr_errors; /* correctable ECC errors */
+ unsigned int restart_queue;
+ u32 txd_cmd;
+
+ bool detect_tx_hung;
+ bool tx_hang_recheck;
+ u8 tx_timeout_factor;
+
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+
+ /* Tx stats */
+ u64 tpt_old;
+ u64 colc_old;
+ u32 gotc;
+ u64 gotc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u32 tx_dma_failed;
+ u32 tx_hwtstamp_timeouts;
+
+ /* Rx */
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
+ struct e1000_ring *rx_ring;
+
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+
+ /* Rx stats */
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 gorc;
+ u64 gorc_old;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+ u32 rx_hwtstamp_cleared;
+
+ unsigned int rx_ps_pages;
+ u16 rx_ps_bsize0;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+
+ spinlock_t stats64_lock; /* protects statistics counters */
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ /* Snapshot of PHY registers */
+ struct e1000_phy_regs phy_regs;
+
+ struct e1000_ring test_tx_ring;
+ struct e1000_ring test_rx_ring;
+ u32 test_icr;
+
+ u32 msg_enable;
+ unsigned int num_vectors;
+ struct msix_entry *msix_entries;
+ int int_mode;
+ u32 eiac_mask;
+
+ u32 eeprom_wol;
+ u32 wol;
+ u32 pba;
+ u32 max_hw_frame_size;
+
+ bool fc_autoneg;
+
+ unsigned int flags;
+ unsigned int flags2;
+ struct work_struct downshift_task;
+ struct work_struct update_phy_task;
+ struct work_struct print_hang_task;
+
+ int phy_hang_count;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct delayed_work systim_overflow_work;
+ struct sk_buff *tx_hwtstamp_skb;
+ unsigned long tx_hwtstamp_start;
+ struct work_struct tx_hwtstamp_work;
+ spinlock_t systim_lock; /* protects SYSTIML/H regsters */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct pm_qos_request pm_qos_req;
+
+ u16 eee_advert;
+};
+
+struct e1000_info {
+ enum e1000_mac_type mac;
+ unsigned int flags;
+ unsigned int flags2;
+ u32 pba;
+ u32 max_hw_frame_size;
+ s32 (*get_variants)(struct e1000_adapter *);
+ const struct e1000_mac_operations *mac_ops;
+ const struct e1000_phy_operations *phy_ops;
+ const struct e1000_nvm_operations *nvm_ops;
+};
+
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
+
+/* The system time is maintained by a 64-bit counter comprised of the 32-bit
+ * SYSTIMH and SYSTIML registers. How the counter increments (and therefore
+ * its resolution) is based on the contents of the TIMINCA register - it
+ * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0).
+ * For the best accuracy, the incperiod should be as small as possible. The
+ * incvalue is scaled by a factor as large as possible (while still fitting
+ * in bits 23:0) so that relatively small clock corrections can be made.
+ *
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
+ * bits to count nanoseconds leaving the rest for fractional nonseconds.
+ */
+#define INCVALUE_96MHz 125
+#define INCVALUE_SHIFT_96MHz 17
+#define INCPERIOD_SHIFT_96MHz 2
+#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+
+#define INCVALUE_25MHz 40
+#define INCVALUE_SHIFT_25MHz 18
+#define INCPERIOD_25MHz 1
+
+/* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+ * by simply reading the clock before it overflows.
+ *
+ * Clock ns bits Overflows after
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
+ * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs
+ * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
+ */
+#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
+
+/* hardware capability, feature, and workaround flags */
+#define FLAG_HAS_AMT (1 << 0)
+#define FLAG_HAS_FLASH (1 << 1)
+#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
+#define FLAG_HAS_WOL (1 << 3)
+/* reserved bit4 */
+#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
+#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
+#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
+#define FLAG_READ_ONLY_NVM (1 << 8)
+#define FLAG_IS_ICH (1 << 9)
+#define FLAG_HAS_MSIX (1 << 10)
+#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
+#define FLAG_IS_QUAD_PORT_A (1 << 12)
+#define FLAG_IS_QUAD_PORT (1 << 13)
+#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
+#define FLAG_APME_IN_WUC (1 << 15)
+#define FLAG_APME_IN_CTRL3 (1 << 16)
+#define FLAG_APME_CHECK_PORT_B (1 << 17)
+#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
+#define FLAG_NO_WAKE_UCAST (1 << 19)
+#define FLAG_MNG_PT_ENABLED (1 << 20)
+#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
+#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
+#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
+#define FLAG_RX_NEEDS_RESTART (1 << 24)
+#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
+#define FLAG_SMART_POWER_DOWN (1 << 26)
+#define FLAG_MSI_ENABLED (1 << 27)
+/* reserved (1 << 28) */
+#define FLAG_TSO_FORCE (1 << 29)
+#define FLAG_RESTART_NOW (1 << 30)
+#define FLAG_MSI_TEST_FAILED (1 << 31)
+
+#define FLAG2_CRC_STRIPPING (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
+#define FLAG2_HAS_PHY_STATS (1 << 4)
+#define FLAG2_HAS_EEE (1 << 5)
+#define FLAG2_DMA_BURST (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
+#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
+#define FLAG2_NO_DISABLE_RX (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
+#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
+#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
+
+#define E1000_RX_DESC_PS(R, i) \
+ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_ACCESS_SHARED_RESOURCE,
+ __E1000_DOWN
+};
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+extern char e1000e_driver_name[];
+extern const char e1000e_driver_version[];
+
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter, bool reset);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+
+extern unsigned int copybreak;
+
+extern const struct e1000_info e1000_82571_info;
+extern const struct e1000_info e1000_82572_info;
+extern const struct e1000_info e1000_82573_info;
+extern const struct e1000_info e1000_82574_info;
+extern const struct e1000_info e1000_82583_info;
+extern const struct e1000_info e1000_ich8_info;
+extern const struct e1000_info e1000_ich9_info;
+extern const struct e1000_info e1000_ich10_info;
+extern const struct e1000_info e1000_pch_info;
+extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_pch_spt_info;
+extern const struct e1000_info e1000_es2_info;
+
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
+
+static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ return hw->phy.ops.reset(hw);
+}
+
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return hw->phy.ops.read_reg(hw, offset, data);
+}
+
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
+static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return hw->phy.ops.write_reg(hw, offset, data);
+}
+
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.validate(hw);
+}
+
+static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.update(hw);
+}
+
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return hw->nvm.ops.read(hw, offset, words, data);
+}
+
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return hw->nvm.ops.write(hw, offset, words, data);
+}
+
+static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ return hw->phy.ops.get_info(hw);
+}
+
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+#define er32(reg) __er32(hw, E1000_##reg)
+
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
+
+#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
+
+#define e1e_flush() er32(STATUS)
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
+ (__ew32((a), (reg + ((offset) << 2)), (value)))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) \
+ (readl((a)->hw_addr + reg + ((offset) << 2)))
+
+#endif /* _E1000_H_ */
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c b/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c
new file mode 100644
index 000000000..11f486e4f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -0,0 +1,2322 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* ethtool support for e1000 */
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+
+#include "e1000.h"
+
+enum { NETDEV_STATS, E1000_STATS };
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(str, m) { \
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
+#define E1000_NETDEV_STAT(str, m) { \
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ E1000_STAT("rx_packets", stats.gprc),
+ E1000_STAT("tx_packets", stats.gptc),
+ E1000_STAT("rx_bytes", stats.gorc),
+ E1000_STAT("tx_bytes", stats.gotc),
+ E1000_STAT("rx_broadcast", stats.bprc),
+ E1000_STAT("tx_broadcast", stats.bptc),
+ E1000_STAT("rx_multicast", stats.mprc),
+ E1000_STAT("tx_multicast", stats.mptc),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
+ E1000_STAT("multicast", stats.mprc),
+ E1000_STAT("collisions", stats.colc),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
+ E1000_STAT("rx_crc_errors", stats.crcerrs),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
+ E1000_STAT("rx_no_buffer_count", stats.rnbc),
+ E1000_STAT("rx_missed_errors", stats.mpc),
+ E1000_STAT("tx_aborted_errors", stats.ecol),
+ E1000_STAT("tx_carrier_errors", stats.tncrs),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
+ E1000_STAT("tx_window_errors", stats.latecol),
+ E1000_STAT("tx_abort_late_coll", stats.latecol),
+ E1000_STAT("tx_deferred_ok", stats.dc),
+ E1000_STAT("tx_single_coll_ok", stats.scc),
+ E1000_STAT("tx_multi_coll_ok", stats.mcc),
+ E1000_STAT("tx_timeout_count", tx_timeout_count),
+ E1000_STAT("tx_restart_queue", restart_queue),
+ E1000_STAT("rx_long_length_errors", stats.roc),
+ E1000_STAT("rx_short_length_errors", stats.ruc),
+ E1000_STAT("rx_align_errors", stats.algnerrc),
+ E1000_STAT("tx_tcp_seg_good", stats.tsctc),
+ E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ E1000_STAT("rx_flow_control_xon", stats.xonrxc),
+ E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ E1000_STAT("tx_flow_control_xon", stats.xontxc),
+ E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
+ E1000_STAT("rx_csum_offload_good", hw_csum_good),
+ E1000_STAT("rx_csum_offload_errors", hw_csum_err),
+ E1000_STAT("rx_header_split", rx_hdr_split),
+ E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ E1000_STAT("tx_smbus", stats.mgptc),
+ E1000_STAT("rx_smbus", stats.mgprc),
+ E1000_STAT("dropped_smbus", stats.mgpdc),
+ E1000_STAT("rx_dma_failed", rx_dma_failed),
+ E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+ E1000_STAT("corr_ecc_errors", corr_errors),
+ E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+};
+
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 speed;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->phy.type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
+
+ if (netif_running(netdev)) {
+ if (netif_carrier_ok(netdev)) {
+ speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex - 1;
+ }
+ } else if (!pm_runtime_suspended(netdev->dev.parent)) {
+ u32 status = er32(STATUS);
+
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
+ }
+
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ netif_carrier_ok(netdev))
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ return 0;
+}
+
+static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work
+ */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
+ (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
+ goto err_inval;
+ }
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
+ return 0;
+
+err_inval:
+ e_err("Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val = 0;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed
+ */
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
+ e_err("Cannot change link characteristics when SoL/IDER is active.\n");
+ ret_val = -EINVAL;
+ goto out;
+ }
+
+ /* MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ ret_val = -EINVAL;
+ goto out;
+ }
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ if (hw->phy.media_type == e1000_media_type_fiber)
+ hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE | ADVERTISED_Autoneg;
+ else
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP | ADVERTISED_Autoneg;
+ ecmd->advertising = hw->phy.autoneg_advertised;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ ret_val = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter, true);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return ret_val;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == e1000_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter, true);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000e_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000e_set_fc_watermarks(hw);
+ }
+ }
+
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return retval;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device __always_unused *netdev)
+{
+#define E1000_REGS_LEN 32 /* overestimate */
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN(0));
+ regs_buff[4] = er32(RDH(0));
+ regs_buff[5] = er32(RDT(0));
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN(0));
+ regs_buff[9] = er32(TDH(0));
+ regs_buff[10] = er32(TDT(0));
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
+
+ /* ethtool doesn't use anything past this point, so all this
+ * code is likely legacy junk for apps that may or may not exist
+ */
+ if (hw->phy.type == e1000_phy_m88) {
+ e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, MII_STAT1000, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ } else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ if (ret_val) {
+ /* a read error occurred, throw away the result */
+ memset(eeprom_buff, 0xff, sizeof(u16) *
+ (last_word - first_word + 1));
+ } else {
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic !=
+ (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ return -EFAULT;
+
+ if (adapter->flags & FLAG_READ_ONLY_NVM)
+ return -EINVAL;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (!ret_val))
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+
+ if (ret_val)
+ goto out;
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ cpu_to_le16s(&eeprom_buff[i]);
+
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ if (ret_val)
+ goto out;
+
+ /* Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for applicable controllers
+ */
+ if ((first_word <= NVM_CHECKSUM_REG) ||
+ (hw->mac.type == e1000_82583) ||
+ (hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82573))
+ ret_val = e1000e_update_nvm_checksum(hw);
+
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version));
+
+ /* EEPROM image version # is reported as firmware version # for
+ * PCI-E controllers
+ */
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = E1000_MAX_RXD;
+ ring->tx_max_pending = E1000_MAX_TXD;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
+ int err = 0, size = sizeof(struct e1000_ring);
+ bool set_tx = false, set_rx = false;
+ u16 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
+ E1000_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
+ E1000_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
+ /* nothing to do */
+ return 0;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ /* Set counts now and allocate resources during open() */
+ adapter->tx_ring->count = new_tx_count;
+ adapter->rx_ring->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ set_tx = (new_tx_count != adapter->tx_ring_count);
+ set_rx = (new_rx_count != adapter->rx_ring_count);
+
+ /* Allocate temporary storage for ring updates */
+ if (set_tx) {
+ temp_tx = vmalloc(size);
+ if (!temp_tx) {
+ err = -ENOMEM;
+ goto free_temp;
+ }
+ }
+ if (set_rx) {
+ temp_rx = vmalloc(size);
+ if (!temp_rx) {
+ err = -ENOMEM;
+ goto free_temp;
+ }
+ }
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ e1000e_down(adapter, true);
+
+ /* We can't just free everything and then setup again, because the
+ * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
+ * structs. First, attempt to allocate new resources...
+ */
+ if (set_tx) {
+ memcpy(temp_tx, adapter->tx_ring, size);
+ temp_tx->count = new_tx_count;
+ err = e1000e_setup_tx_resources(temp_tx);
+ if (err)
+ goto err_setup;
+ }
+ if (set_rx) {
+ memcpy(temp_rx, adapter->rx_ring, size);
+ temp_rx->count = new_rx_count;
+ err = e1000e_setup_rx_resources(temp_rx);
+ if (err)
+ goto err_setup_rx;
+ }
+
+ /* ...then free the old resources and copy back any new ring data */
+ if (set_tx) {
+ e1000e_free_tx_resources(adapter->tx_ring);
+ memcpy(adapter->tx_ring, temp_tx, size);
+ adapter->tx_ring_count = new_tx_count;
+ }
+ if (set_rx) {
+ e1000e_free_rx_resources(adapter->rx_ring);
+ memcpy(adapter->rx_ring, temp_rx, size);
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+err_setup_rx:
+ if (err && set_tx)
+ e1000e_free_tx_resources(temp_tx);
+err_setup:
+ e1000e_up(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+free_temp:
+ vfree(temp_tx);
+ vfree(temp_rx);
+clear_reset:
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
+{
+ u32 pat, val;
+ static const u32 test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
+ for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
+ E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
+ (test[pat] & write));
+ val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+ if (val != (test[pat] & write & mask)) {
+ e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
+ reg + (offset << 2), val,
+ (test[pat] & write & mask));
+ *data = reg;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val;
+
+ __ew32(&adapter->hw, reg, write & mask);
+ val = __er32(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
+ *data = reg;
+ return true;
+ }
+ return false;
+}
+
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+ return 1; \
+ } while (0)
+#define REG_PATTERN_TEST(reg, mask, write) \
+ REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ u32 value;
+ u32 before;
+ u32 after;
+ u32 i;
+ u32 toggle;
+ u32 mask;
+ u32 wlock_mac = 0;
+
+ /* The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored. There are several bits
+ * on newer hardware that are r/w.
+ */
+ switch (mac->type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ toggle = 0x7FFFF3FF;
+ break;
+ default:
+ toggle = 0x7FFFF033;
+ break;
+ }
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ if (!(adapter->flags & FLAG_IS_ICH)) {
+ REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
+
+ REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
+
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+ if (!(adapter->flags & FLAG_IS_ICH))
+ REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+ mask = 0x8003FFFF;
+ switch (mac->type) {
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ mask |= (1 << 18);
+ break;
+ default:
+ break;
+ }
+
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
+ wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
+ E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ for (i = 0; i < mac->rar_entry_count; i++) {
+ if ((mac->type == e1000_pch_lpt) ||
+ (mac->type == e1000_pch_spt)) {
+ /* Cannot test write-protected SHRAL[n] registers */
+ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
+ continue;
+
+ /* SHRAH[9] different than the others */
+ if (i == 10)
+ mask |= (1 << 30);
+ else
+ mask &= ~(1 << 30);
+ }
+ if (mac->type == e1000_pch2lan) {
+ /* SHRAH[0,1,2] different than previous */
+ if (i == 1)
+ mask &= 0xFFF4FFFF;
+ /* SHRAH[3] different than SHRAH[0,1,2] */
+ if (i == 4)
+ mask |= (1 << 30);
+ /* RAR[1-6] owned by management engine - skipping */
+ if (i > 0)
+ i += 6;
+ }
+
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
+ 0xFFFFFFFF);
+ /* reset index to actual value */
+ if ((mac->type == e1000_pch2lan) && (i > 6))
+ i -= 6;
+ }
+
+ for (i = 0; i < mac->mta_reg_count; i++)
+ REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ return *data;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16)NVM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *)data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mask;
+ u32 shared_int = 1;
+ u32 irq = adapter->pdev->irq;
+ int i;
+ int ret_val = 0;
+ int int_mode = E1000E_INT_MODE_LEGACY;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
+ if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
+ int_mode = adapter->int_mode;
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev)) {
+ shared_int = 0;
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
+ netdev)) {
+ *data = 1;
+ ret_val = -1;
+ goto out;
+ }
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Test each interrupt */
+ for (i = 0; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ switch (mask) {
+ case E1000_ICR_RXSEQ:
+ continue;
+ case 0x00000100:
+ if (adapter->hw.mac.type == e1000_ich8lan ||
+ adapter->hw.mac.type == e1000_ich9lan)
+ continue;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+out:
+ if (int_mode == E1000E_INT_MODE_MSIX) {
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = int_mode;
+ e1000e_set_interrupt_capability(adapter);
+ }
+
+ return ret_val;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
+ int i;
+
+ if (tx_ring->desc && tx_ring->buffer_info) {
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
+ }
+ }
+
+ if (rx_ring->desc && rx_ring->buffer_info) {
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ 2048, DMA_FROM_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
+ }
+ }
+
+ if (tx_ring->desc) {
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+ if (rx_ring->desc) {
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+
+ kfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ kfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ int i;
+ int ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!tx_ring->count)
+ tx_ring->count = E1000_DEFAULT_TXD;
+
+ tx_ring->buffer_info = kcalloc(tx_ring->count,
+ sizeof(struct e1000_buffer), GFP_KERNEL);
+ if (!tx_ring->buffer_info) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
+ ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH(0), 0);
+ ew32(TDT(0), 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < tx_ring->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+ struct sk_buff *skb;
+ unsigned int skb_size = 1024;
+
+ skb = alloc_skb(skb_size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, skb_size);
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].length = skb->len;
+ tx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rx_ring->count)
+ rx_ring->count = E1000_DEFAULT_RXD;
+
+ rx_ring->buffer_info = kcalloc(rx_ring->count,
+ sizeof(struct e1000_buffer), GFP_KERNEL);
+ if (!rx_ring->buffer_info) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+
+ rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
+ ew32(RDLEN(0), rx_ring->size);
+ ew32(RDH(0), 0);
+ ew32(RDT(0), 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rx_ring->count; i++) {
+ union e1000_rx_desc_extended *rx_desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 7;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_ring->buffer_info[i].skb = skb;
+ rx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
+ ret_val = 8;
+ goto err_nomem;
+ }
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr =
+ cpu_to_le64(rx_ring->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1e_wphy(&adapter->hw, 29, 0x001F);
+ e1e_wphy(&adapter->hw, 30, 0x8FFC);
+ e1e_wphy(&adapter->hw, 29, 0x001A);
+ e1e_wphy(&adapter->hw, 30, 0x8FF0);
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u16 phy_reg = 0;
+ s32 ret_val = 0;
+
+ hw->mac.autoneg = 0;
+
+ if (hw->phy.type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1e_wphy(hw, MII_BMCR, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+ e1e_flush();
+ usleep_range(500, 1000);
+
+ return 0;
+ }
+
+ /* Specific PHY configuration for loopback */
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ /* Auto-MDI/MDIX Off */
+ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1e_wphy(hw, MII_BMCR, 0x9140);
+ /* autoneg off */
+ e1e_wphy(hw, MII_BMCR, 0x8140);
+ break;
+ case e1000_phy_gg82563:
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
+ break;
+ case e1000_phy_bm:
+ /* Set Default MAC Interface speed to 1GB */
+ e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
+ phy_reg &= ~0x0007;
+ phy_reg |= 0x006;
+ e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
+ /* Assert SW reset for above settings to take effect */
+ hw->phy.ops.commit(hw);
+ usleep_range(1000, 2000);
+ /* Force Full Duplex */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
+ /* Set Link Up (in force link) */
+ e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
+ /* Force Link */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
+ /* Set Early Link Enable */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82578:
+ /* Workaround: K1 must be disabled for stable 1Gbps operation */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Cannot setup 1Gbps loopback.\n");
+ return ret_val;
+ }
+ e1000_configure_k1_ich8lan(hw, false);
+ hw->phy.ops.release(hw);
+ break;
+ case e1000_phy_82579:
+ /* Disable PHY energy detect power down */
+ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ /* Disable full chip energy detect */
+ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+ /* Enable loopback on the PHY */
+ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+ break;
+ default:
+ break;
+ }
+
+ /* force 1000, set loopback */
+ e1e_wphy(hw, MII_BMCR, 0x4140);
+ msleep(250);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->flags & FLAG_IS_ICH)
+ ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
+
+ if (hw->phy.media_type == e1000_media_type_copper &&
+ hw->phy.type == e1000_phy_m88) {
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ } else {
+ /* Set the ILOS bit on the fiber Nic if half duplex link is
+ * detected.
+ */
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy.type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+ int link;
+
+ /* special requirements for 82571/82572 fiber adapters */
+
+ /* jump through hoops to make sure link is up because serdes
+ * link is hardwired up
+ */
+ ctrl |= E1000_CTRL_SLU;
+ ew32(CTRL, ctrl);
+
+ /* disable autoneg */
+ ctrl = er32(TXCW);
+ ctrl &= ~(1 << 31);
+ ew32(TXCW, ctrl);
+
+ link = (er32(STATUS) & E1000_STATUS_LU);
+
+ if (!link) {
+ /* set invert loss of signal */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_ILOS;
+ ew32(CTRL, ctrl);
+ }
+
+ /* special write to serdes control register to enable SerDes analog
+ * loopback
+ */
+ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/* only call this for fiber/serdes connections to es2lan */
+static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrlext = er32(CTRL_EXT);
+ u32 ctrl = er32(CTRL);
+
+ /* save CTRL_EXT to restore later, reuse an empty variable (unused
+ * on mac_type 80003es2lan)
+ */
+ adapter->tx_fifo_head = ctrlext;
+
+ /* clear the serdes mode bits, putting the device into mac loopback */
+ ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ ew32(CTRL_EXT, ctrlext);
+
+ /* force speed to 1000/FD, link up */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
+ E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* set mac loopback */
+ ctrl = er32(RCTL);
+ ctrl |= E1000_RCTL_LBM_MAC;
+ ew32(RCTL, ctrl);
+
+ /* set testing mode parameters (no need to reset later) */
+#define KMRNCTRLSTA_OPMODE (0x1F << 16)
+#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
+ ew32(KMRNCTRLSTA,
+ (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+
+ return 0;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ return e1000_set_es2lan_mac_loopback(adapter);
+ case e1000_82571:
+ case e1000_82572:
+ return e1000_set_82571_fiber_loopback(adapter);
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ return e1000_integrated_phy_loopback(adapter);
+ }
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ /* restore CTRL_EXT, stealing space from tx_fifo_head */
+ ew32(CTRL_EXT, adapter->tx_fifo_head);
+ adapter->tx_fifo_head = 0;
+ }
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ e1e_flush();
+ usleep_range(10000, 20000);
+ break;
+ }
+ /* Fall Through */
+ default:
+ hw->mac.autoneg = 1;
+ if (hw->phy.type == e1000_phy_gg82563)
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
+ e1e_rphy(hw, MII_BMCR, &phy_reg);
+ if (phy_reg & BMCR_LOOPBACK) {
+ phy_reg &= ~BMCR_LOOPBACK;
+ e1e_wphy(hw, MII_BMCR, phy_reg);
+ if (hw->phy.ops.commit)
+ hw->phy.ops.commit(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF)
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ return 0;
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ int i, j, k, l;
+ int lc;
+ int good_cnt;
+ int ret_val = 0;
+ unsigned long time;
+
+ ew32(RDT(0), rx_ring->count - 1);
+
+ /* Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ k = 0;
+ l = 0;
+ /* loop count loop */
+ for (j = 0; j <= lc; j++) {
+ /* send the packets */
+ for (i = 0; i < 64; i++) {
+ buffer_info = &tx_ring->buffer_info[k];
+
+ e1000_create_lbtest_frame(buffer_info->skb, 1024);
+ dma_sync_single_for_device(&pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ k++;
+ if (k == tx_ring->count)
+ k = 0;
+ }
+ ew32(TDT(0), k);
+ e1e_flush();
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ /* receive the sent packets */
+ do {
+ buffer_info = &rx_ring->buffer_info[l];
+
+ dma_sync_single_for_cpu(&pdev->dev,
+ buffer_info->dma, 2048,
+ DMA_FROM_DEVICE);
+
+ ret_val = e1000_check_lbtest_frame(buffer_info->skb,
+ 1024);
+ if (!ret_val)
+ good_cnt++;
+ l++;
+ if (l == rx_ring->count)
+ l = 0;
+ /* time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (time_after(jiffies, time + 20)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ }
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* PHY loopback cannot be performed if SoL/IDER sessions are active */
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
+ e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+
+ hw->mac.serdes_has_link = false;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes
+ */
+ do {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.serdes_has_link)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.autoneg)
+ /* On some Phy/switch combinations, link establishment
+ * can take a few seconds more than expected.
+ */
+ msleep_interruptible(5000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ *data = 1;
+ }
+ return *data;
+}
+
+static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u16 autoneg_advertised;
+ u8 forced_speed_duplex;
+ u8 autoneg;
+ bool if_running = netif_running(netdev);
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+ e_info("offline testing starting\n");
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* force this routine to wait until autoneg complete/timeout */
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.mac.autoneg = autoneg;
+ e1000e_reset(adapter);
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ /* Online tests */
+
+ e_info("online testing starting\n");
+
+ /* register, eeprom, intr and loopback tests not run online */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ }
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
+ msleep_interruptible(4 * 1000);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ if (adapter->flags & FLAG_NO_WAKE_UCAST) {
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ e_err("Interface does not support directed (unicast) frame wake-up packets\n");
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev) ||
+ (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_MAGIC | WAKE_PHY)))
+ return -EOPNOTSUPP;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (!hw->mac.ops.blink_led)
+ return 2; /* cycle on/off twice per second */
+
+ hw->mac.ops.blink_led(hw);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (hw->phy.type == e1000_phy_ife)
+ e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ hw->mac.ops.led_off(hw);
+ hw->mac.ops.cleanup_led(hw);
+ pm_runtime_put_sync(netdev->dev.parent);
+ break;
+
+ case ETHTOOL_ID_ON:
+ hw->mac.ops.led_on(hw);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ hw->mac.ops.led_off(hw);
+ break;
+ }
+
+ return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->itr_setting <= 4)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+ return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 4) &&
+ (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr_setting = 4;
+ adapter->itr = adapter->itr_setting;
+ } else if (ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = 20000;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+ adapter->itr_setting = adapter->itr & ~3;
+ }
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (adapter->itr_setting != 0)
+ e1000e_write_itr(adapter, adapter->itr);
+ else
+ e1000e_write_itr(adapter, 0);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+ e1000e_reinit_locked(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
+ int i;
+ char *p = NULL;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ e1000e_get_stats64(netdev, &net_stats);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ switch (e1000_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *)&net_stats +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ case E1000_STATS:
+ p = (char *)adapter +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
+
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void e1000_get_strings(struct net_device __always_unused *netdev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int e1000_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info,
+ u32 __always_unused *rule_locs)
+{
+ info->data = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH: {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+ mrqc = er32(MRQC);
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
+ return 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
+ u32 ret_val;
+
+ if (!(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ cap_addr = I82579_EEE_CAPABILITY;
+ lpa_addr = I82579_EEE_LP_ABILITY;
+ pcs_stat_addr = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ cap_addr = I217_EEE_CAPABILITY;
+ lpa_addr = I217_EEE_LP_ABILITY;
+ pcs_stat_addr = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ pm_runtime_put_sync(netdev->dev.parent);
+ return -EBUSY;
+ }
+
+ /* EEE Capability */
+ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+
+ /* EEE Advertised */
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* EEE Link Partner Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE PCS Status */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ if (hw->phy.type == e1000_phy_82579)
+ phy_data <<= 8;
+
+ /* Result of the EEE auto negotiation - there is no register that
+ * has the status of the EEE negotiation so do a best-guess based
+ * on whether Tx or Rx LPI indications have been received.
+ */
+ if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
+ edata->tx_lpi_enabled = true;
+ edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
+
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ ret_val = -ENODATA;
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return ret_val;
+}
+
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ ret_val = e1000e_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err("Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err("Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return 0;
+}
+
+static int e1000e_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return 0;
+
+ info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+
+ return 0;
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000e_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
+ .get_rxnfc = e1000_get_rxnfc,
+ .get_ts_info = e1000e_get_ts_info,
+ .get_eee = e1000e_get_eee,
+ .set_eee = e1000e_set_eee,
+};
+
+void e1000e_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &e1000_ethtool_ops;
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/hw.h b/kernel/drivers/net/ethernet/intel/e1000e/hw.h
new file mode 100644
index 000000000..19e8c487d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/hw.h
@@ -0,0 +1,698 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "regs.h"
+#include "defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_BM 0x10E5
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
+#define E1000_DEV_ID_ICH9_IGP_C 0x294C
+#define E1000_DEV_ID_ICH9_IFE 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */
+
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_1 1
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
+enum e1000_mac_type {
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_82574,
+ e1000_82583,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_ich9lan,
+ e1000_ich10lan,
+ e1000_pchlan,
+ e1000_pch2lan,
+ e1000_pch_lpt,
+ e1000_pch_spt,
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_flash_hw,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
+ e1000_phy_82579,
+ e1000_phy_i217,
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
+/* Function pointers for the MAC. */
+struct e1000_mac_operations {
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
+ int (*rar_set)(struct e1000_hw *, u8 *, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ u32 (*rar_get_count)(struct e1000_hw *);
+};
+
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*cfg_on_link_up)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ void (*reload)(struct e1000_hw *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ enum e1000_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+ bool in_ifs_mode;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+ enum e1000_serdes_link_state serdes_link_state;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_width width;
+
+ u16 func;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_dev_spec_82571 {
+ bool laa_is_present;
+ u32 smb_counter;
+};
+
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+ u16 value;
+ bool modified;
+};
+
+#define E1000_ICH8_SHADOW_RAM_WORDS 2048
+
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
+struct e1000_dev_spec_ich8lan {
+ bool kmrn_lock_loss_workaround_enabled;
+ struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
+ bool nvm_k1_enabled;
+ bool eee_disable;
+ u16 eee_lp_ability;
+ enum e1000_ulp_state ulp_state;
+};
+
+struct e1000_hw {
+ struct e1000_adapter *adapter;
+
+ void __iomem *hw_addr;
+ void __iomem *flash_address;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82571 e82571;
+ struct e1000_dev_spec_80003es2lan e80003es2lan;
+ struct e1000_dev_spec_ich8lan ich8lan;
+ } dev_spec;
+};
+
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c
new file mode 100644
index 000000000..9d81c0317
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -0,0 +1,5802 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
+ */
+
+#include "e1000.h"
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/* ICH Flash Protected Region */
+union ich8_flash_protected_range {
+ struct ich8_pr {
+ u32 base:13; /* 0:12 Protected Range Base */
+ u32 reserved1:2; /* 13:14 Reserved */
+ u32 rpe:1; /* 15 Read Protection Enable */
+ u32 limit:13; /* 16:28 Protected Range Limit */
+ u32 reserved2:2; /* 29:30 Reserved */
+ u32 wpe:1; /* 31 Write Protection Enable */
+ } range;
+ u32 regval;
+};
+
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte);
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data);
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data);
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
+
+static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
+{
+ return readw(hw->flash_address + reg);
+}
+
+static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
+{
+ return readl(hw->flash_address + reg);
+}
+
+static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
+{
+ writew(val, hw->flash_address + reg);
+}
+
+static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ writel(val, hw->flash_address + reg);
+}
+
+#define er16flash(reg) __er16flash(hw, (reg))
+#define er32flash(reg) __er32flash(hw, (reg))
+#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
+#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
+
+/**
+ * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
+ * @hw: pointer to the HW structure
+ *
+ * Test access to the PHY registers by reading the PHY ID registers. If
+ * the PHY ID is already known (e.g. resume path) compare it with known ID,
+ * otherwise assume the read PHY ID is correct if it is valid.
+ *
+ * Assumes the sw/fw/hw semaphore is already acquired.
+ **/
+static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
+{
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val = 0;
+ u16 retry_count;
+ u32 mac_reg = 0;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
+
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
+
+ if (hw->phy.id) {
+ if (hw->phy.id == phy_id)
+ goto out;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
+ goto out;
+ }
+
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
+
+ if (ret_val)
+ return false;
+out:
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
+ return true;
+}
+
+/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = er32(FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ ew32(FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = er32(CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+ usleep_range(10, 20);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msleep(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ usleep_range(5000, 10000);
+ } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
+
+ msleep(30);
+ }
+}
+
+/**
+ * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
+ * @hw: pointer to the HW structure
+ *
+ * Workarounds/flow necessary for PHY initialization during driver load
+ * and resume paths.
+ **/
+static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->adapter;
+ u32 mac_reg, fwsm = er32(FWSM);
+ s32 ret_val;
+
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+ e1000_disable_ulp_lpt_lp(hw, true);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_dbg("Failed to initialize PHY flow\n");
+ goto out;
+ }
+
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ * inaccessible and resetting the PHY is not blocked, toggle the
+ * LANPHYPC Value bit to force the interconnect to PCIe mode.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Before toggling LANPHYPC, see if PHY is accessible by
+ * forcing MAC to SMBus mode first.
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msleep(50);
+
+ /* fall-through */
+ case e1000_pch2lan:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* fall-through */
+ case e1000_pchlan:
+ if ((hw->mac.type == e1000_pchlan) &&
+ (fwsm & E1000_ICH_FWSM_FW_VALID))
+ break;
+
+ if (hw->phy.ops.check_reset_block(hw)) {
+ e_dbg("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
+ }
+ break;
+ default:
+ break;
+ }
+
+ hw->phy.ops.release(hw);
+ if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ e_err("Reset blocked by ME\n");
+ goto out;
+ }
+
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ e_err("ME blocked access to PHY after reset\n");
+ }
+
+out:
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ usleep_range(10000, 20000);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.set_page = e1000_set_page_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ phy->id = e1000_phy_unknown;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->id == e1000_phy_unknown)
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+ phy->type = e1000e_get_phy_type_from_id(phy->id);
+
+ switch (phy->type) {
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ case e1000_phy_i217:
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.commit = e1000e_phy_sw_reset;
+ break;
+ case e1000_phy_82578:
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000e_get_cable_length_m88;
+ phy->ops.get_info = e1000e_get_phy_info_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 i = 0;
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+
+ /* We may need to do this twice - once for IGP and if that fails,
+ * we'll set BM func pointers and try again
+ */
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val) {
+ phy->ops.write_reg = e1000e_write_phy_reg_bm;
+ phy->ops.read_reg = e1000e_read_phy_reg_bm;
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val) {
+ e_dbg("Cannot determine PHY addr. Erroring out\n");
+ return ret_val;
+ }
+ }
+
+ phy->id = 0;
+ while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
+ (i++ < 100)) {
+ usleep_range(1000, 2000);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (phy->id) {
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
+ phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
+ phy->ops.get_info = e1000e_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy->type = e1000_phy_ife;
+ phy->autoneg_mask = E1000_ALL_NOT_GIG;
+ phy->ops.get_info = e1000_get_phy_info_ife;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ phy->type = e1000_phy_bm;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg = e1000e_read_phy_reg_bm;
+ phy->ops.write_reg = e1000e_write_phy_reg_bm;
+ phy->ops.commit = e1000e_phy_sw_reset;
+ phy->ops.get_info = e1000e_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific NVM parameters and function
+ * pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 gfpreg, sector_base_addr, sector_end_addr;
+ u16 i;
+ u32 nvm_size;
+
+ nvm->type = e1000_nvm_flash_sw;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ e_dbg("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
+
+ nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
+
+ /* Clear shadow ram */
+ for (i = 0; i < nvm->word_size; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific MAC parameters and function
+ * pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ /* Set media type function pointer */
+ hw->phy.media_type = e1000_media_type_copper;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 32;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+ if (mac->type == e1000_ich8lan)
+ mac->rar_entry_count--;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* LED and other operations */
+ switch (mac->type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000e_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000e_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000e_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_ich8lan;
+ mac->ops.led_off = e1000_led_off_ich8lan;
+ break;
+ case e1000_pch2lan:
+ mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch2lan;
+ /* fall-through */
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pchlan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
+ default:
+ break;
+ }
+
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
+ mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch_lpt;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_pch_lpt;
+ mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
+ }
+
+ /* Enable PCS Lock-loss workaround for ICH8 */
+ if (mac->type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+ return 0;
+}
+
+/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
+ else
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
+ **/
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ s32 ret_val;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ return 0;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ if (ret_val)
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ /* Save off link partner's EEE ability */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
+ if (ret_val)
+ goto release;
+
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable and for which we advertise EEE.
+ */
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ e1e_rphy_locked(hw, MII_LPA, &data);
+ if (data & LPA_100FULL)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+ }
+
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
+ if (ret_val)
+ goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ * preventing further DMA write requests. Workaround the issue by disabling
+ * the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
+ **/
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+ u32 fextnvm6 = er32(FEXTNVM6);
+ u32 status = er32(STATUS);
+ s32 ret_val = 0;
+ u16 reg;
+
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &reg);
+ if (ret_val)
+ goto release;
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg &
+ ~E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto release;
+
+ usleep_range(10, 20);
+
+ ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg);
+release:
+ hw->phy.ops.release(hw);
+ } else {
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ ew32(FEXTNVM6, fextnvm6);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_platform_pm_pch_lpt - Set platform power management values
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
+ * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
+ * when link is up (which must not exceed the maximum latency supported
+ * by the platform), otherwise specify there is no LTR requirement.
+ * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
+ * latencies in the LTR Extended Capability Structure in the PCIe Extended
+ * Capability register set, on this device LTR is set by writing the
+ * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
+ * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
+ * message to the PMC.
+ **/
+static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+{
+ u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
+ link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+ u16 lat_enc = 0; /* latency encoded */
+
+ if (link) {
+ u16 speed, duplex, scale = 0;
+ u16 max_snoop, max_nosnoop;
+ u16 max_ltr_enc; /* max LTR latency encoded */
+ s64 lat_ns; /* latency (ns) */
+ s64 value;
+ u32 rxa;
+
+ if (!hw->adapter->max_frame_size) {
+ e_dbg("max_frame_size not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ if (!speed) {
+ e_dbg("Speed not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Rx Packet Buffer Allocation size (KB) */
+ rxa = er32(PBA) & E1000_PBA_RXA_MASK;
+
+ /* Determine the maximum latency tolerated by the device.
+ *
+ * Per the PCIe spec, the tolerated latencies are encoded as
+ * a 3-bit encoded scale (only 0-5 are valid) multiplied by
+ * a 10-bit value (0-1023) to provide a range from 1 ns to
+ * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
+ * 1=2^5ns, 2=2^10ns,...5=2^25ns.
+ */
+ lat_ns = ((s64)rxa * 1024 -
+ (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
+ if (lat_ns < 0)
+ lat_ns = 0;
+ else
+ do_div(lat_ns, speed);
+
+ value = lat_ns;
+ while (value > PCI_LTR_VALUE_MASK) {
+ scale++;
+ value = DIV_ROUND_UP(value, (1 << 5));
+ }
+ if (scale > E1000_LTRV_SCALE_MAX) {
+ e_dbg("Invalid LTR latency scale %d\n", scale);
+ return -E1000_ERR_CONFIG;
+ }
+ lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
+
+ /* Determine the maximum latency tolerated by the platform */
+ pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
+ &max_snoop);
+ pci_read_config_word(hw->adapter->pdev,
+ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
+ max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
+
+ if (lat_enc > max_ltr_enc)
+ lat_enc = max_ltr_enc;
+ }
+
+ /* Set Snoop and No-Snoop latencies the same */
+ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
+ ew32(LTRV, reg);
+
+ return 0;
+}
+
+/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+
+ goto out;
+ }
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (er32(STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msleep(50);
+ }
+ e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (er32(FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (er32(WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ e_dbg("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+ */
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ usleep_range(10000, 20000);
+ }
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ ew32(H2ME, mac_reg);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ msleep(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ }
+out:
+ if (ret_val)
+ e_dbg("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val, tipg_reg = 0;
+ u16 emi_addr, emi_val = 0;
+ bool link;
+ u16 phy_reg;
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return 0;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_k1_gig_workaround_hv(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When connected at 10Mbps half-duplex, some parts are excessively
+ * aggressive resulting in many collisions. To avoid this, increase
+ * the IPG and reduce Rx latency in the PHY.
+ */
+ if (((hw->mac.type == e1000_pch2lan) ||
+ (hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) && link) {
+ u32 reg;
+
+ reg = er32(STATUS);
+ tipg_reg = er32(TIPG);
+ tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+
+ if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ tipg_reg |= 0xFF;
+ /* Reduce Rx latency in analog PHY */
+ emi_val = 0;
+ } else {
+
+ /* Roll back the default values */
+ tipg_reg |= 0x08;
+ emi_val = 1;
+ }
+
+ ew32(TIPG, tipg_reg);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Work-around I218 hang issue */
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ /* Set platform power management values for
+ * Latency Tolerance Reporting (LTR)
+ */
+ ret_val = e1000_platform_pm_pch_lpt(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Clear link partner's EEE ability */
+ hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
+ /* FEXTNVM6 K1-off workaround */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 pcieanacfg = er32(PCIEANACFG);
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ ew32(FEXTNVM6, fextnvm6);
+ }
+
+ if (!link)
+ return 0; /* No link detected */
+
+ mac->get_link_status = false;
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ return ret_val;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /* Enable/Disable EEE after link up */
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ e_dbg("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 rc;
+
+ rc = e1000_init_mac_params_ich8lan(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_ich8lan(hw);
+ if (rc)
+ return rc;
+
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ rc = e1000_init_phy_params_ich8lan(hw);
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ rc = e1000_init_phy_params_pchlan(hw);
+ break;
+ default:
+ break;
+ }
+ if (rc)
+ return rc;
+
+ /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+ * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+ */
+ if ((adapter->hw.phy.type == e1000_phy_ife) ||
+ ((adapter->hw.mac.type >= e1000_pch2lan) &&
+ (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
+ adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ hw->mac.ops.blink_led = NULL;
+ }
+
+ if ((adapter->hw.mac.type == e1000_ich8lan) &&
+ (adapter->hw.phy.type != e1000_phy_ife))
+ adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+
+ /* Enable workaround for 82579 w/ ME enabled */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
+ return 0;
+}
+
+static DEFINE_MUTEX(nvm_mutex);
+
+/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
+{
+ mutex_lock(&nvm_mutex);
+
+ return 0;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
+{
+ mutex_unlock(&nvm_mutex);
+}
+
+/**
+ * e1000_acquire_swflag_ich8lan - Acquire software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = 0;
+
+ if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
+ &hw->adapter->state)) {
+ e_dbg("contention for Phy access\n");
+ return -E1000_ERR_PHY;
+ }
+
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
+
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("SW has already locked the resource.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = SW_FLAG_TIMEOUT;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+ er32(FWSM), extcnf_ctrl);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+out:
+ if (ret_val)
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_swflag_ich8lan - Release software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ } else {
+ e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+ }
+
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+}
+
+/**
+ * e1000_check_mng_mode_ich8lan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has any manageability enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_rar_set_pch2lan - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr. For 82579, RAR[0] is the base address register that is to
+ * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
+ * Use SHRA[0-3] in place of those reserved for ME.
+ **/
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
+ return 0;
+ }
+
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32)(hw->mac.rar_entry_count)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ if (ret_val)
+ goto out;
+
+ ew32(SHRAL(index - 1), rar_low);
+ e1e_flush();
+ ew32(SHRAH(index - 1), rar_high);
+ e1e_flush();
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((er32(SHRAL(index - 1)) == rar_low) &&
+ (er32(SHRAH(index - 1)) == rar_high))
+ return 0;
+
+ e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
+ (index - 1), er32(FWSM));
+ }
+
+out:
+ e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ * @hw: pointer to the HW structure
+ *
+ * Get the number of available receive registers that the Host can
+ * program. SHRA[0-10] are the shared receive address registers
+ * that are shared between the Host and manageability engine (ME).
+ * ME can reserve any number of addresses and the host needs to be
+ * able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+ u32 wlock_mac;
+ u32 num_entries;
+
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ switch (wlock_mac) {
+ case 0:
+ /* All SHRA[0..10] and RAR[0] available */
+ num_entries = hw->mac.rar_entry_count;
+ break;
+ case 1:
+ /* Only RAR[0] available */
+ num_entries = 1;
+ break;
+ default:
+ /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+ num_entries = wlock_mac + 1;
+ break;
+ }
+
+ return num_entries;
+}
+
+/**
+ * e1000_rar_set_pch_lpt - Set receive address registers
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address register array at index to the address passed
+ * in by addr. For LPT, RAR[0] is the base address register that is to
+ * contain the MAC address. SHRA[0-10] are the shared receive address
+ * registers that are shared between the Host and manageability engine (ME).
+ **/
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+ u32 wlock_mac;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
+ return 0;
+ }
+
+ /* The manageability engine (ME) can lock certain SHRAR registers that
+ * it is using - those registers are unavailable for use.
+ */
+ if (index < hw->mac.rar_entry_count) {
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ /* Check if all SHRAR registers are locked */
+ if (wlock_mac == 1)
+ goto out;
+
+ if ((wlock_mac == 0) || (index <= wlock_mac)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+ if (ret_val)
+ goto out;
+
+ ew32(SHRAL_PCH_LPT(index - 1), rar_low);
+ e1e_flush();
+ ew32(SHRAH_PCH_LPT(index - 1), rar_high);
+ e1e_flush();
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+ (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
+ return 0;
+ }
+ }
+
+out:
+ e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Checks if firmware is blocking the reset of the PHY.
+ * This is a function pointer entry point only called by
+ * reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+ bool blocked = false;
+ int i = 0;
+
+ while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
+ (i++ < 10))
+ usleep_range(10000, 20000);
+ return blocked ? E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+ u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+ E1000_STRAP_SMT_FREQ_SHIFT;
+ s32 ret_val;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ /* Restore SMBus frequency */
+ if (freq--) {
+ phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+ phy_data |= (freq & (1 << 0)) <<
+ HV_SMB_ADDR_FREQ_LOW_SHIFT;
+ phy_data |= (freq & (1 << 1)) <<
+ (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+ } else {
+ e_dbg("Unsupported SMB frequency in PHY\n");
+ }
+ }
+
+ return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+}
+
+/**
+ * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ *
+ * SW should configure the LCD from the NVM extended configuration region
+ * as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+ s32 ret_val = 0;
+ u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+ /* Initialize the PHY from the NVM on ICH platforms. This
+ * is needed due to an issue where the NVM configuration is
+ * not properly autoloaded after power transitions.
+ * Therefore, after each PHY reset, we will load the
+ * configuration data out of the NVM manually.
+ */
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto release;
+
+ /* Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if ((hw->mac.type < e1000_pch2lan) &&
+ (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+ goto release;
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto release;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+ if (((hw->mac.type == e1000_pchlan) &&
+ !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+ (hw->mac.type > e1000_pchlan)) {
+ /* HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
+ */
+ ret_val = e1000_write_smbus_addr(hw);
+ if (ret_val)
+ goto release;
+
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Configure LCD from extended configuration region. */
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
+ if (ret_val)
+ goto release;
+
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto release;
+
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
+
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
+
+ ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
+ if (ret_val)
+ goto release;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_hv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ * from a lower speed. This workaround disables K1 whenever link is at 1Gig
+ * If link is down, the function will restore the default K1 setting located
+ * in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+ if (hw->mac.type != e1000_pchlan)
+ return 0;
+
+ /* Wrap the whole flow with the sw flag */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+ if (link) {
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (status_reg == (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
+
+ if (status_reg == (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ /* Link stall fix for link up */
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
+ if (ret_val)
+ goto release;
+
+ } else {
+ /* Link stall fix for link down */
+ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_configure_k1_ich8lan - Configure K1 power state
+ * @hw: pointer to the HW structure
+ * @enable: K1 state to configure
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+ s32 ret_val;
+ u32 ctrl_reg = 0;
+ u32 ctrl_ext = 0;
+ u32 reg = 0;
+ u16 kmrn_reg = 0;
+
+ ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (k1_enable)
+ kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+ else
+ kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+ ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ usleep_range(20, 40);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_reg = er32(CTRL);
+
+ reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ reg |= E1000_CTRL_FRCSPD;
+ ew32(CTRL, reg);
+
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ e1e_flush();
+ usleep_range(20, 40);
+ ew32(CTRL, ctrl_reg);
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+ usleep_range(20, 40);
+
+ return 0;
+}
+
+/**
+ * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ * @d0_state: boolean if entering d0 or d3 device state
+ *
+ * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
+ * in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 oem_reg;
+
+ if (hw->mac.type < e1000_pchlan)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pchlan) {
+ mac_reg = er32(EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto release;
+ }
+
+ mac_reg = er32(FEXTNVM);
+ if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+ goto release;
+
+ mac_reg = er32(PHY_CTRL);
+
+ ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto release;
+
+ oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+ if (d0_state) {
+ if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ } else {
+ if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU))
+ oem_reg |= HV_OEM_BITS_LPLU;
+ }
+
+ /* Set Restart auto-neg to activate the bits */
+ if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
+ !hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_data;
+
+ if (hw->mac.type != e1000_pchlan)
+ return 0;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
+
+ /* Preamble tuning for SSC */
+ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type == e1000_phy_82578) {
+ /* Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000e_phy_sw_reset(hw);
+ ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
+ }
+ }
+
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy.addr = 1;
+ ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the K1 Si workaround during phy reset assuming there is
+ * link so that it disables K1 if link is in 1Gbps.
+ */
+ ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ return ret_val;
+
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i, phy_reg = 0;
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
+ mac_reg = er32(RAL(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+ (u16)((mac_reg >> 16) & 0xFFFF));
+
+ mac_reg = er32(RAH(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+ (u16)((mac_reg & E1000_RAH_AV)
+ >> 16));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = 0;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ if (hw->mac.type < e1000_pch2lan)
+ return 0;
+
+ /* disable Rx path while enabling/disabling workaround */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ if (ret_val)
+ return ret_val;
+
+ if (enable) {
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ u32 addr_high, addr_low;
+
+ addr_high = er32(RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = er32(RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+ ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Enable jumbo frame workaround in the PHY */
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (E1000_TX_PTR_GAP << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Write PHY register values back to h/w defaults */
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data |= (1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ return ret_val;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return 0;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
+ if (ret_val)
+ goto release;
+ /* drop link after 5 times MSE threshold was reached */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 in 1000Mbps and 100Mbps
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return 0;
+
+ /* Set K1 beacon duration based on 10Mbs speed */
+ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
+ u16 pm_phy_reg;
+
+ /* LV 1G/100 Packet drop issue wa */
+ ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ } else {
+ u32 mac_reg;
+
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+ ew32(FEXTNVM4, mac_reg);
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ if (hw->mac.type < e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = er32(STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ usleep_range(100, 200);
+ } while ((!data) && --loop);
+
+ /* If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ e_dbg("LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = er32(STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, data);
+}
+
+/**
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 reg;
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return 0;
+
+ /* Allow time for h/w to get to quiescent state after reset */
+ usleep_range(10000, 20000);
+
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the host wakeup bit after lcd reset */
+ if (hw->mac.type >= e1000_pchlan) {
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+ reg &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+ }
+
+ /* Configure the LCD with the extended configuration region in NVM */
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the LCD with the OEM bits in NVM */
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+ if (hw->mac.type == e1000_pch2lan) {
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ usleep_range(10000, 20000);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ /* Set EEE LPI Update Timer to 200usec */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
+ hw->phy.ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_post_phy_reset_ich8lan(hw);
+}
+
+/**
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val;
+ u16 oem_reg;
+
+ ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
+
+ if (!hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+}
+
+/**
+ * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = 0;
+ u16 data;
+
+ if (phy->type == e1000_phy_ife)
+ return 0;
+
+ phy_ctrl = er32(PHY_CTRL);
+
+ if (active) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D3 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = 0;
+ u16 data;
+
+ phy_ctrl = er32(PHY_CTRL);
+
+ if (!active) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ * @hw: pointer to the HW structure
+ * @bank: pointer to the variable that returns the active bank
+ *
+ * Reads signature byte from the NVM using the flash access registers.
+ * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+ u32 eecd;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+ u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+ u8 sig_byte = 0;
+ s32 ret_val;
+
+ switch (hw->mac.type) {
+ /* In SPT, read from the CTRL_EXT reg instead of
+ * accessing the sector valid bits from the nvm
+ */
+ case e1000_pch_spt:
+ *bank = er32(CTRL_EXT)
+ & E1000_CTRL_EXT_NVMVS;
+ if ((*bank == 0) || (*bank == 1)) {
+ e_dbg("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ } else {
+ *bank = *bank - 2;
+ return 0;
+ }
+ break;
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+ E1000_EECD_SEC1VAL_VALID_MASK) {
+ if (eecd & E1000_EECD_SEC1VAL)
+ *bank = 1;
+ else
+ *bank = 0;
+
+ return 0;
+ }
+ e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
+ /* fall-thru */
+ default:
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return 0;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+ bank1_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return 0;
+ }
+
+ e_dbg("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ }
+}
+
+/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = 0;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset + i].modified) ||
+ !(dev_spec->shadow_ram[offset + i + 1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ else
+ data[i] = (u16)(dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i + 1] =
+ dev_spec->shadow_ram[offset + i + 1].value;
+ else
+ data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ e_dbg("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words
+ * @data: Pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u16 i, word;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = 0;
+ for (i = 0; i < words; i++) {
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] = dev_spec->shadow_ram[offset + i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw,
+ act_offset + i,
+ &word);
+ if (ret_val)
+ break;
+ data[i] = word;
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ e_dbg("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_init_ich8lan - Initialize flash
+ * @hw: pointer to the HW structure
+ *
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 ret_val = -E1000_ERR_NVM;
+
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ /* Check if the flash descriptor is valid */
+ if (!hsfsts.hsf_status.fldesvalid) {
+ e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Clear FCERR and DAEL in hw status by writing 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress
+ * bit to check against, in order to start a new cycle or
+ * FDONE bit should be changed in the hardware so that it
+ * is 1 after hardware reset, which can then be used as an
+ * indication whether a cycle is in progress or has been
+ * completed.
+ */
+
+ if (!hsfsts.hsf_status.flcinprog) {
+ /* There is no cycle running at present,
+ * so we can start a cycle.
+ * Begin by setting Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ ret_val = 0;
+ } else {
+ s32 i;
+
+ /* Otherwise poll for sometime so the current
+ * cycle has a chance to end before giving up.
+ */
+ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (!hsfsts.hsf_status.flcinprog) {
+ ret_val = 0;
+ break;
+ }
+ udelay(1);
+ }
+ if (!ret_val) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ e_dbg("Flash controller busy, cannot get access\n");
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ * @hw: pointer to the HW structure
+ * @timeout: maximum time to wait for completion
+ *
+ * This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ u32 i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone)
+ break;
+ udelay(1);
+ } while (i++ < timeout);
+
+ if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
+ return 0;
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
+ * e1000_read_flash_word_ich8lan - Read word from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash word at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ /* Must convert offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ * e1000_read_flash_byte_ich8lan - Read byte from flash
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to read.
+ * @data: Pointer to a byte to store the value read.
+ *
+ * Reads a single byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data)
+{
+ s32 ret_val;
+ u16 word = 0;
+
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
+ if (ret_val)
+ return ret_val;
+
+ *data = (u8)word;
+
+ return 0;
+}
+
+/**
+ * e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte or word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: Pointer to the word to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (!ret_val) {
+ flash_data = er32flash(ICH_FLASH_FDATA0);
+ if (size == 1)
+ *data = (u8)(flash_data & 0x000000FF);
+ else if (size == 2)
+ *data = (u16)(flash_data & 0x0000FFFF);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type != e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (!ret_val) {
+ *data = er32flash(ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to write.
+ * @words: Size of data to write in words
+ * @data: Pointer to the word(s) to write at offset.
+ *
+ * Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 i;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ nvm->ops.acquire(hw);
+
+ for (i = 0; i < words; i++) {
+ dev_spec->shadow_ram[offset + i].modified = true;
+ dev_spec->shadow_ram[offset + i].value = data[i];
+ }
+
+ nvm->ops.release(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u32 dword = 0;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usleep_range(100, 200);
+
+ /* Write the data to the new bank. Offset in words */
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+ e_dbg("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword */
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword */
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ usleep_range(10000, 20000);
+ }
+
+out:
+ if (ret_val)
+ e_dbg("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data = 0;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ if (dev_spec->shadow_ram[i].modified) {
+ data = dev_spec->shadow_ram[i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw, i +
+ old_bank_offset,
+ &data);
+ if (ret_val)
+ break;
+ }
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ data |= E1000_ICH_NVM_SIG_MASK;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usleep_range(100, 200);
+ /* Write the bytes to the new bank. */
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset,
+ (u8)data);
+ if (ret_val)
+ break;
+
+ usleep_range(100, 200);
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+ e_dbg("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+ ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+ if (ret_val)
+ goto release;
+
+ data &= 0xBFFF;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset * 2 + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ usleep_range(10000, 20000);
+ }
+
+out:
+ if (ret_val)
+ e_dbg("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ * If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ * calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+ u16 word;
+ u16 valid_csum_mask;
+
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = e1000_read_nvm(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = e1000_write_nvm(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+ * @hw: pointer to the HW structure
+ *
+ * To prevent malicious write/erase of the NVM, set it to be read-only
+ * so that the hardware ignores all write/erase cycles of the NVM via
+ * the flash control registers. The shadow-ram copy of the NVM will
+ * still be updated, however any updates to this copy will not stick
+ * across driver reloads.
+ **/
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_flash_protected_range pr0;
+ union ich8_hws_flash_status hsfsts;
+ u32 gfpreg;
+
+ nvm->ops.acquire(hw);
+
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+ /* Write-protect GbE Sector of NVM */
+ pr0.regval = er32flash(ICH_FLASH_PR0);
+ pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+ pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+ pr0.range.wpe = true;
+ ew32flash(ICH_FLASH_PR0, pr0.regval);
+
+ /* Lock down a subset of GbE Flash Control Registers, e.g.
+ * PR0 to prevent the write-protection from being lifted.
+ * Once FLOCKDN is set, the registers protected by it cannot
+ * be written until FLOCKDN is cleared by a hardware reset.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ hsfsts.hsf_status.flockdn = true;
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ nvm->ops.release(hw);
+}
+
+/**
+ * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte/word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: The byte(s) to write to the NVM.
+ *
+ * Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val;
+ u8 count = 0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ ew32flash(ICH_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ if (!ret_val)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ew32flash(ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (!ret_val)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The index of the byte to read.
+ * @data: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 data)
+{
+ u16 word = (u16)data;
+
+ return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usleep_range(100, 200);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (!ret_val)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
+ * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to write.
+ * @byte: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ * Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ return ret_val;
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
+ usleep_range(100, 200);
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
+ * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ * @hw: pointer to the HW structure
+ * @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ * bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ /* bank size is in 16bit words - adjust to bytes */
+ u32 flash_bank_size = nvm->flash_bank_size * 2;
+ s32 ret_val;
+ s32 count = 0;
+ s32 j, iteration, sector_size;
+
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of hw flash status
+ * register
+ * 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector
+ * can be calculated as = bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = bank * 4096
+ * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+ * (ich9 only, otherwise error condition)
+ * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+ */
+ switch (hsfsts.hsf_status.berasesz) {
+ case 0:
+ /* Hw sector size 256 */
+ sector_size = ICH_FLASH_SEG_SIZE_256;
+ iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+ break;
+ case 1:
+ sector_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ break;
+ case 2:
+ sector_size = ICH_FLASH_SEG_SIZE_8K;
+ iteration = 1;
+ break;
+ case 3:
+ sector_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ break;
+ default:
+ return -E1000_ERR_NVM;
+ }
+
+ /* Start with the base address, then add the sector offset. */
+ flash_linear_addr = hw->nvm.flash_base_addr;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+ for (j = 0; j < iteration; j++) {
+ do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Write a value 11 (block Erase) in Flash
+ * Cycle field in hw flash control
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval =
+ er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the
+ * block into Flash Linear address field in Flash
+ * Address.
+ */
+ flash_linear_addr += (j * sector_size);
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
+ if (!ret_val)
+ break;
+
+ /* Check if FCERR is set to 1. If 1,
+ * clear it and try the whole sequence
+ * a few more times else Done
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* repeat for some time before giving up */
+ continue;
+ else if (!hsfsts.hsf_status.flcdone)
+ return ret_val;
+ } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_valid_led_default_ich8lan - Set the default LED settings
+ * @hw: pointer to the HW structure
+ * @data: Pointer to the LED settings
+ *
+ * Reads the LED default settings from the NVM to data. If the NVM LED
+ * settings is all 0's or F's, set the LED default to a valid LED default
+ * setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT_ICH8LAN;
+
+ return 0;
+}
+
+/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = er32(LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ * @hw: pointer to the HW structure
+ *
+ * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ * register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+
+ ret_val = e1000e_get_bus_info_pcie(hw);
+
+ /* ICH devices are "PCI Express"-ish. They have
+ * a configuration space, but do not contain
+ * PCI Express Capability registers, so bus width
+ * must be hardcoded.
+ */
+ if (bus->width == e1000_bus_width_unknown)
+ bus->width = e1000_bus_width_pcie_x1;
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_ich8lan - Reset the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Does a full reset of the hardware which includes a reset of the PHY and
+ * MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 kum_cfg;
+ u32 ctrl, reg;
+ s32 ret_val;
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC
+ * with the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac.type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ ew32(PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ ew32(PBS, E1000_PBS_16K);
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ /* Save the NVM K1 bit setting */
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
+ if (ret_val)
+ return ret_val;
+
+ if (kum_cfg & E1000_NVM_K1_ENABLE)
+ dev_spec->nvm_k1_enabled = true;
+ else
+ dev_spec->nvm_k1_enabled = false;
+ }
+
+ ctrl = er32(CTRL);
+
+ if (!hw->phy.ops.check_reset_block(hw)) {
+ /* Full-chip reset requires MAC and PHY reset at the same
+ * time to make sure the interface between MAC and the
+ * external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+
+ /* Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+ }
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ e_dbg("Issuing a global reset to ich8lan\n");
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
+ msleep(20);
+
+ /* Set Phy Config Counter to 50msec */
+ if (hw->mac.type == e1000_pch2lan) {
+ reg = er32(FEXTNVM3);
+ reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ ew32(FEXTNVM3, reg);
+ }
+
+ if (!ret_val)
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+ if (ctrl & E1000_CTRL_PHY_RST) {
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ ew32(CRC_OFFSET, 0x65656565);
+
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ reg = er32(KABGTXD);
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, reg);
+
+ return 0;
+}
+
+/**
+ * e1000_init_hw_ich8lan - Initialize the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Prepares the hardware for transmit and receive by doing the following:
+ * - initialize hardware bits
+ * - initialize LED identification
+ * - setup receive address registers
+ * - setup flow control
+ * - setup transmit descriptors
+ * - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl_ext, txdctl, snoop;
+ s32 ret_val;
+ u16 i;
+
+ e1000_initialize_hw_bits_ich8lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+
+ /* Setup the receive address. */
+ e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Disable wakeup by clearing the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+ i &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy for both queues */
+ txdctl = er32(TXDCTL(0));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ ew32(TXDCTL(0), txdctl);
+ txdctl = er32(TXDCTL(1));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ ew32(TXDCTL(1), txdctl);
+
+ /* ICH8 has opposite polarity of no_snoop bits.
+ * By default, we should use snoop behavior.
+ */
+ if (mac->type == e1000_ich8lan)
+ snoop = PCIE_ICH8_SNOOP_ALL;
+ else
+ snoop = (u32)~(PCIE_NO_SNOOP_ALL);
+ e1000e_set_pcie_no_snoop(hw, snoop);
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_ich8lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ * @hw: pointer to the HW structure
+ *
+ * Sets/Clears required hardware bits necessary for correctly setting up the
+ * hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Extended Device Control */
+ reg = er32(CTRL_EXT);
+ reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
+ ew32(CTRL_EXT, reg);
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (1 << 28) | (1 << 29);
+ reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ ew32(TARC(1), reg);
+
+ /* Device Status */
+ if (hw->mac.type == e1000_ich8lan) {
+ reg = er32(STATUS);
+ reg &= ~(1 << 31);
+ ew32(STATUS, reg);
+ }
+
+ /* work-around descriptor data corruption issue during nfs v2 udp
+ * traffic, just disable the nfs filtering capability
+ */
+ reg = er32(RFCTL);
+ reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ ew32(RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ reg = er32(PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ ew32(PBECCSTS, reg);
+
+ reg = er32(CTRL);
+ reg |= E1000_CTRL_MEHE;
+ ew32(CTRL, reg);
+ }
+}
+
+/**
+ * e1000_setup_link_ich8lan - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return 0;
+
+ /* ICH parts do not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ /* Workaround h/w hang when Tx flow control enabled */
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+ /* Continue to configure the copper link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ ew32(FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
+ ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Configures the kumeran interface to the PHY to wait the appropriate time
+ * when polling the PHY, then call the generic setup_copper_link to finish
+ * configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each iteration
+ * and increase the max iterations when polling the phy;
+ * this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->phy.type) {
+ case e1000_phy_igp_3:
+ ret_val = e1000e_copper_link_setup_igp(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ ret_val = e1000e_copper_link_setup_m88(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_ife:
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+ switch (hw->phy.mdix) {
+ case 1:
+ reg_data &= ~IFE_PMC_FORCE_MDIX;
+ break;
+ case 2:
+ reg_data |= IFE_PMC_FORCE_MDIX;
+ break;
+ case 0:
+ default:
+ reg_data |= IFE_PMC_AUTO_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY specific link setup function and then calls the
+ * generic setup_copper_link to finish configuring the link for
+ * Lynxpoint PCH devices
+ **/
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
+ * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to store current link speed
+ * @duplex: pointer to store the current link duplex
+ *
+ * Calls the generic get_speed_and_duplex to retrieve the current link
+ * information and then calls the Kumeran lock loss workaround for links at
+ * gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac.type == e1000_ich8lan) &&
+ (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+ ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ * @hw: pointer to the HW structure
+ *
+ * Work-around for 82566 Kumeran PCS lock loss:
+ * On link status change (i.e. PCI reset, speed change) and link is up and
+ * speed is gigabit-
+ * 0) if workaround is optionally disabled do nothing
+ * 1) wait 1ms for Kumeran link to come up
+ * 2) check Kumeran Diagnostic register PCS lock loss bit
+ * 3) if not set the link is locked (all is good), otherwise...
+ * 4) reset the PHY
+ * 5) repeat up to 10 times
+ * Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+ u16 i, data;
+ bool link;
+
+ if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+ return 0;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (!link)
+ return 0;
+
+ for (i = 0; i < 10; i++) {
+ /* read once to clear */
+ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return 0;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ mdelay(5);
+ }
+ /* Disable GigE link negotiation */
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ ew32(PHY_CTRL, phy_ctrl);
+
+ /* Call gig speed drop workaround on Gig disable before accessing
+ * any PHY registers
+ */
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* unable to acquire PCS lock */
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to set the current Kumeran workaround state
+ *
+ * If ICH8, set the current Kumeran workaround state (enabled - true
+ * /disabled - false).
+ **/
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+ if (hw->mac.type != e1000_ich8lan) {
+ e_dbg("Workaround applies to ICH8 only.\n");
+ return;
+ }
+
+ dev_spec->kmrn_lock_loss_workaround_enabled = state;
+}
+
+/**
+ * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * @hw: pointer to the HW structure
+ *
+ * Workaround for 82566 power-down on D3 entry:
+ * 1) disable gigabit link
+ * 2) write VR power-down enable
+ * 3) read it back
+ * Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+ u16 data;
+ u8 retry = 0;
+
+ if (hw->phy.type != e1000_phy_igp_3)
+ return;
+
+ /* Try the workaround twice (if needed) */
+ do {
+ /* Disable link */
+ reg = er32(PHY_CTRL);
+ reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ ew32(PHY_CTRL, reg);
+
+ /* Call gig speed drop workaround on Gig disable before
+ * accessing any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* Write VR power-down enable */
+ e1e_rphy(hw, IGP3_VR_CTRL, &data);
+ data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+ /* Read it back and test */
+ e1e_rphy(hw, IGP3_VR_CTRL, &data);
+ data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = er32(CTRL);
+ ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+}
+
+/**
+ * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ * @hw: pointer to the HW structure
+ *
+ * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ * LPLU, Gig disable, MDIC PHY reset):
+ * 1) Set Kumeran Near-end loopback
+ * 2) Clear Kumeran Near-end loopback
+ * Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data;
+
+ if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
+ return;
+
+ ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ &reg_data);
+ if (ret_val)
+ return;
+ reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+ if (ret_val)
+ return;
+ reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
+}
+
+/**
+ * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ * @hw: pointer to the HW structure
+ *
+ * During S0 to Sx transition, it is possible the link remains at gig
+ * instead of negotiating to a lower speed. Before going to Sx, set
+ * 'Gig Disable' to force link speed negotiation to a lower speed based on
+ * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
+ * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
+ * needs to be written.
+ * Parts that support (and are linked to a partner which support) EEE in
+ * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ * than 10Mbps w/o EEE.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg, device_id = hw->adapter->pdev->device;
+
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (!dev_spec->eee_disable) {
+ u16 eee_advert;
+
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
+ if (ret_val)
+ goto release;
+
+ /* Disable LPLU if both link partners support 100BaseT
+ * EEE and 100Full is advertised on both ends of the
+ * link, and enable Auto Enable LPI since there will
+ * be no driver to enable LPI while in Sx.
+ */
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
+ (dev_spec->eee_lp_ability &
+ I82579_EEE_100_SUPPORTED) &&
+ (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
+ phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU);
+
+ /* Set Auto Enable LPI after link up */
+ e1e_rphy_locked(hw,
+ I217_LPI_GPIO_CTRL, &phy_reg);
+ phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ e1e_wphy_locked(hw,
+ I217_LPI_GPIO_CTRL, phy_reg);
+ }
+ }
+
+ /* For i217 Intel Rapid Start Technology support,
+ * when the system is going into Sx and no manageability engine
+ * is present, the driver must configure proxy to reset only on
+ * power good. LPI (Low Power Idle) state must also reset only
+ * on power good, as well as the MTA (Multicast table array).
+ * The SMBus release must also be disabled on LCD reset.
+ */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Enable proxy to reset only on power good. */
+ e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
+ phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
+
+ /* Set bit enable LPI (EEE) to reset only on
+ * power good.
+ */
+ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
+ e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
+
+ /* Disable the SMB release on LCD reset. */
+ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+ }
+
+ /* Enable MTA to reset for Intel Rapid Start Technology
+ * Support
+ */
+ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ }
+out:
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, false);
+
+ /* Reset PHY to activate OEM bits on 82577/8 */
+ if (hw->mac.type == e1000_pchlan)
+ e1000e_phy_hw_reset_generic(hw);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
+}
+
+/**
+ * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ * @hw: pointer to the HW structure
+ *
+ * During Sx to S0 transitions on non-managed devices or managed devices
+ * on which PHY resets are not blocked, if the PHY registers cannot be
+ * accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ * the PHY.
+ * On i217, setup Intel Rapid Start Technology.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ if (hw->mac.type < e1000_pch2lan)
+ return;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val) {
+ e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
+ return;
+ }
+
+ /* For i217 Intel Rapid Start Technology support when the system
+ * is transitioning from Sx and no manageability engine is present
+ * configure SMBus to restore on reset, disable proxy, and enable
+ * the reset on MTA (Multicast table array).
+ */
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_dbg("Failed to setup iRST\n");
+ return;
+ }
+
+ /* Clear Auto Enable LPI after link up */
+ e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+ phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Restore clear on SMB if no manageability engine
+ * is present
+ */
+ ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
+ e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+
+ /* Disable Proxy */
+ e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
+ }
+ /* Enable reset on MTA */
+ ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
+ e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+release:
+ if (ret_val)
+ e_dbg("Error %d in resume workarounds\n", ret_val);
+ hw->phy.ops.release(hw);
+ }
+}
+
+/**
+ * e1000_cleanup_led_ich8lan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+
+ ew32(LEDCTL, hw->mac.ledctl_default);
+ return 0;
+}
+
+/**
+ * e1000_led_on_ich8lan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+ ew32(LEDCTL, hw->mac.ledctl_mode2);
+ return 0;
+}
+
+/**
+ * e1000_led_off_ich8lan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE |
+ IFE_PSCL_PROBE_LEDS_OFF));
+
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ return 0;
+}
+
+/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ /* If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u32 status;
+
+ e1000e_get_cfg_done_generic(hw);
+
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /* When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
+ }
+
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
+
+ /* If EEPROM is not marked present, init the IGP 3 PHY manually */
+ if (hw->mac.type <= e1000_ich9lan) {
+ if (!(er32(EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3)) {
+ e1000e_phy_init_script_igp3(hw);
+ }
+ } else {
+ if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+ /* Maybe we should do a basic PHY config */
+ e_dbg("EEPROM not present\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears hardware counters specific to the silicon family and calls
+ * clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ s32 ret_val;
+
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+ hw->phy.ops.release(hw);
+ }
+}
+
+static const struct e1000_mac_operations ich8_mac_ops = {
+ /* check_mng_mode dependent on mac type */
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
+ /* cleanup_led dependent on mac type */
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
+ .get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
+ .get_link_up_info = e1000_get_link_up_info_ich8lan,
+ /* led_on dependent on mac type */
+ /* led_off dependent on mac type */
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .reset_hw = e1000_reset_hw_ich8lan,
+ .init_hw = e1000_init_hw_ich8lan,
+ .setup_link = e1000_setup_link_ich8lan,
+ .setup_physical_interface = e1000_setup_copper_link_ich8lan,
+ /* id_led_init dependent on mac type */
+ .config_collision_dist = e1000e_config_collision_dist_generic,
+ .rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
+};
+
+static const struct e1000_phy_operations ich8_phy_ops = {
+ .acquire = e1000_acquire_swflag_ich8lan,
+ .check_reset_block = e1000_check_reset_block_ich8lan,
+ .commit = NULL,
+ .get_cfg_done = e1000_get_cfg_done_ich8lan,
+ .get_cable_length = e1000e_get_cable_length_igp_2,
+ .read_reg = e1000e_read_phy_reg_igp,
+ .release = e1000_release_swflag_ich8lan,
+ .reset = e1000_phy_hw_reset_ich8lan,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
+ .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
+ .write_reg = e1000e_write_phy_reg_igp,
+};
+
+static const struct e1000_nvm_operations ich8_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+ .reload = e1000e_reload_nvm_generic,
+ .update = e1000_update_nvm_checksum_ich8lan,
+ .valid_led_default = e1000_valid_led_default_ich8lan,
+ .validate = e1000_validate_nvm_checksum_ich8lan,
+ .write = e1000_write_nvm_ich8lan,
+};
+
+static const struct e1000_nvm_operations spt_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+ .read = e1000_read_nvm_spt,
+ .update = e1000_update_nvm_checksum_spt,
+ .reload = e1000e_reload_nvm_generic,
+ .valid_led_default = e1000_valid_led_default_ich8lan,
+ .validate = e1000_validate_nvm_checksum_ich8lan,
+ .write = e1000_write_nvm_ich8lan,
+};
+
+const struct e1000_info e1000_ich8_info = {
+ .mac = e1000_ich8lan,
+ .flags = FLAG_HAS_WOL
+ | FLAG_IS_ICH
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 8,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich9_info = {
+ .mac = e1000_ich9lan,
+ .flags = FLAG_HAS_JUMBO_FRAMES
+ | FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 18,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich10_info = {
+ .mac = e1000_ich10lan,
+ .flags = FLAG_HAS_JUMBO_FRAMES
+ | FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 18,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_info = {
+ .mac = e1000_pchlan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS,
+ .pba = 26,
+ .max_hw_frame_size = 4096,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch2_info = {
+ .mac = e1000_pch2lan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9018,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_lpt_info = {
+ .mac = e1000_pch_lpt,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9018,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_spt_info = {
+ .mac = e1000_pch_spt,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9018,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644
index 000000000..770a573b9
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -0,0 +1,303 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
+
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+
+#define K1_ENTRY_LATENCY 0
+#define K1_MIN_TIME 1
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE 0x4000
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
+/* Low Power Idle GPIO Control */
+#define I217_LPI_GPIO_CTRL PHY_REG(772, 18)
+#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
+#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG 0xB20C /* Receive configuration */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV 0x000F8
+#define E1000_LTRV_SCALE_MAX 5
+#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_REQ_SHIFT 15
+#define E1000_LTRV_NOSNOOP_SHIFT 16
+#define E1000_LTRV_SEND (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT 0xA8
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/mac.c b/kernel/drivers/net/ethernet/intel/e1000e/mac.c
new file mode 100644
index 000000000..30b74d590
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/mac.c
@@ -0,0 +1,1800 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000e_get_bus_info_pcie - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ struct e1000_adapter *adapter = hw->adapter;
+ u16 pcie_link_status, cap_offset;
+
+ cap_offset = adapter->pdev->pcie_cap;
+ if (!cap_offset) {
+ bus->width = e1000_bus_width_unknown;
+ } else {
+ pci_read_config_word(adapter->pdev,
+ cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status);
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCIE_LINK_WIDTH_MASK) >>
+ PCIE_LINK_WIDTH_SHIFT);
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /* The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ e1e_flush();
+ }
+}
+
+/**
+ * e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ e1e_flush();
+}
+
+/**
+ * e1000e_init_rx_addrs - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+
+ /* Setup the receive address */
+ e_dbg("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
+
+ /* not supported on 82573 */
+ if (hw->mac.type == e1000_82573)
+ return 0;
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ return 0;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (is_multicast_ether_addr(alt_mac_addr)) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ return 0;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+ return 0;
+}
+
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+ return hw->mac.rar_entry_count;
+}
+
+/**
+ * e1000e_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
+
+ return 0;
+}
+
+/**
+ * e1000_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16)mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000e_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32)i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ e1e_flush();
+}
+
+/**
+ * e1000e_clear_hw_cntrs_base - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+ er32(CRCERRS);
+ er32(SYMERRS);
+ er32(MPC);
+ er32(SCC);
+ er32(ECOL);
+ er32(MCC);
+ er32(LATECOL);
+ er32(COLC);
+ er32(DC);
+ er32(SEC);
+ er32(RLEC);
+ er32(XONRXC);
+ er32(XONTXC);
+ er32(XOFFRXC);
+ er32(XOFFTXC);
+ er32(FCRUC);
+ er32(GPRC);
+ er32(BPRC);
+ er32(MPRC);
+ er32(GPTC);
+ er32(GORCL);
+ er32(GORCH);
+ er32(GOTCL);
+ er32(GOTCH);
+ er32(RNBC);
+ er32(RUC);
+ er32(RFC);
+ er32(ROC);
+ er32(RJC);
+ er32(TORL);
+ er32(TORH);
+ er32(TOTL);
+ er32(TOTH);
+ er32(TPR);
+ er32(TPT);
+ er32(MPTC);
+ er32(BPTC);
+}
+
+/**
+ * e1000e_check_for_copper_link - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return 0;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return 0; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ e_dbg("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000e_check_for_fiber_link - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return 0;
+ }
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_check_for_serdes_link - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return 0;
+ }
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+ /* If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ usleep_range(10, 20);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ e_dbg("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & er32(TXCW)) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ usleep_range(10, 20);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ e_dbg("SERDES: Link up - autoneg completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 e1000e_setup_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return 0;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ ew32(FCTTV, hw->fc.pause_time);
+
+ return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ /* Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ew32(TXCW, txcw);
+ mac->txcw = txcw;
+
+ return 0;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ /* If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ usleep_range(10000, 20000);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ e_dbg("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = true;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ e_dbg("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = false;
+ } else {
+ mac->autoneg_failed = false;
+ e_dbg("Valid Link Found\n");
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ e_dbg("Auto-negotiation enabled\n");
+
+ ew32(CTRL, ctrl);
+ e1e_flush();
+ usleep_range(1000, 2000);
+
+ /* For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ e_dbg("No signal detected\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ e1e_flush();
+}
+
+/**
+ * e1000e_set_fc_watermarks - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ ew32(FCRTL, fcrtl);
+ ew32(FCRTH, fcrth);
+
+ return 0;
+}
+
+/**
+ * e1000e_force_mac_fc - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000e_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * e1000e_config_fc_after_link_up - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000e_force_mac_fc(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = e1000e_force_mac_fc(hw);
+ }
+
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
+ e_dbg("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = er32(PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ e_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = er32(PCS_ANADV);
+ pcs_lp_ability_reg = er32(PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = er32(PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ ew32(PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
+
+ return 0;
+}
+
+/**
+ * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
+ *hw, u16 *speed, u16 *duplex)
+{
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return 0;
+}
+
+/**
+ * e1000e_get_hw_semaphore - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = er32(SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usleep_range(50, 100);
+ i++;
+ }
+
+ if (i == timeout) {
+ e_dbg("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (er32(SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usleep_range(50, 100);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000e_put_hw_semaphore(hw);
+ e_dbg("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_put_hw_semaphore - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000e_put_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = er32(SWSM);
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+
+/**
+ * e1000e_get_auto_rd_done - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
+{
+ s32 i = 0;
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (er32(EECD) & E1000_EECD_AUTO_RD)
+ break;
+ usleep_range(1000, 2000);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ e_dbg("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_valid_led_default - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return 0;
+}
+
+/**
+ * e1000e_id_led_init_generic -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = er32(LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000e_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
+{
+ ew32(LEDCTL, hw->mac.ledctl_default);
+ return 0;
+}
+
+/**
+ * e1000e_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the LEDs which are set to be on.
+ **/
+s32 e1000e_blink_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
+ }
+
+ ew32(LEDCTL, ledctl_blink);
+
+ return 0;
+}
+
+/**
+ * e1000e_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000e_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ ew32(CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ ew32(LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 e1000e_led_off_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ ew32(CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
+ * @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
+ *
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ if (no_snoop) {
+ gcr = er32(GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ ew32(GCR, gcr);
+ }
+}
+
+/**
+ * e1000e_disable_pcie_master - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ ew32(CTRL, ctrl);
+
+ while (timeout) {
+ if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ usleep_range(100, 200);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000e_reset_adaptive(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ ew32(AIT, 0);
+}
+
+/**
+ * e1000e_update_adaptive - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000e_update_adaptive(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ ew32(AIT, mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/mac.h b/kernel/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644
index 000000000..0513d90cd
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/mac.h
@@ -0,0 +1,68 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/manage.c b/kernel/drivers/net/ethernet/intel/e1000e/manage.c
new file mode 100644
index 000000000..06edfca1a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/manage.c
@@ -0,0 +1,347 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8)(0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ if (!hw->mac.arc_subsystem_valid) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ e_dbg("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ mdelay(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ e_dbg("Previous command timeout failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = er32(FWSM);
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /* If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i));
+ e1e_flush();
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_mng_host_if_write - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = er32(HICR);
+ ew32(HICR, hicr | E1000_HICR_C);
+
+ return 0;
+}
+
+/**
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+ s32 ret_val;
+
+ factps = er32(FACTPS);
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val)
+ return false;
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/manage.h b/kernel/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644
index 000000000..a8c27f98f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/manage.h
@@ -0,0 +1,65 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/netdev.c b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c
new file mode 100644
index 000000000..c509a5c90
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -0,0 +1,7316 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+
+#include "e1000.h"
+
+#define DRV_EXTRAVERSION "-k"
+
+#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+char e1000e_driver_name[] = "e1000e";
+const char e1000e_driver_version[] = DRV_VERSION;
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const struct e1000_info *e1000_info_tbl[] = {
+ [board_82571] = &e1000_82571_info,
+ [board_82572] = &e1000_82572_info,
+ [board_82573] = &e1000_82573_info,
+ [board_82574] = &e1000_82574_info,
+ [board_82583] = &e1000_82583_info,
+ [board_80003es2lan] = &e1000_es2_info,
+ [board_ich8lan] = &e1000_ich8_info,
+ [board_ich9lan] = &e1000_ich9_info,
+ [board_ich10lan] = &e1000_ich10_info,
+ [board_pchlan] = &e1000_pch_info,
+ [board_pch2lan] = &e1000_pch2_info,
+ [board_pch_lpt] = &e1000_pch_lpt_info,
+ [board_pch_spt] = &e1000_pch_spt_info,
+};
+
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* Rx Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* Tx Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {0, NULL}
+};
+
+/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time. Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value. Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+ udelay(50);
+
+ return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ __ew32_prepare(hw);
+
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
+ * e1000_regdump - register printout routine
+ * @hw: pointer to the HW structure
+ * @reginfo: pointer to the register info table
+ **/
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ pr_info("%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
+}
+
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+ struct e1000_buffer *bi)
+{
+ int i;
+ struct e1000_ps_page *ps_page;
+
+ for (i = 0; i < adapter->rx_ps_pages; i++) {
+ ps_page = &bi->ps_pages[i];
+
+ if (ps_page->page) {
+ pr_info("packet dump for ps_page %d:\n", i);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, page_address(ps_page->page),
+ PAGE_SIZE, true);
+ }
+ }
+}
+
+/**
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
+ * @adapter: board private structure
+ **/
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 {
+ __le64 a;
+ __le64 b;
+ } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ union e1000_rx_desc_extended *rx_desc;
+ struct my_u1 {
+ __le64 a;
+ __le64 b;
+ __le64 c;
+ __le64 d;
+ } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print Tx Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
+
+ /* Print Tx Ring */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ buffer_info->skb->len, true);
+ }
+
+ /* Print Rx Ring Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ pr_info(" %5d %5X %5X\n",
+ 0, rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print Rx Ring */
+ if (!netif_msg_rx_status(adapter))
+ return;
+
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb, next_desc);
+ } else {
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter))
+ e1000e_dump_ps_pages(adapter,
+ buffer_info);
+ }
+ }
+ break;
+ default:
+ case 0:
+ /* Extended Receive Descriptor (Read) Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Reserved |
+ * +-----------------------------------------------------+
+ */
+ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
+ /* Extended Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 24 23 4 3 0
+ * +------------------------------------------------------+
+ * | RSS Hash | | | |
+ * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
+ * | Packet | IP | | | Type |
+ * | Checksum | Ident | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb, next_desc);
+ } else {
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16,
+ 1,
+ buffer_info->skb->data,
+ adapter->rx_buffer_len,
+ true);
+ }
+ }
+ }
+}
+
+/**
+ * e1000_desc_unused - calculate if we have unused descriptors
+ **/
+static int e1000_desc_unused(struct e1000_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
+ * @adapter: board private structure
+ * @hwtstamps: time stamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * Convert the system time value stored in the RX/TXSTMP registers into a
+ * hwtstamp which can be used by the upper level time stamping functions.
+ *
+ * The 'systim_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two 32 bit registers. The first read latches the
+ * value.
+ **/
+static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
+ * @adapter: board private structure
+ * @status: descriptor extended error and status field
+ * @skb: particular skb to include time stamp
+ *
+ * If the time stamp is valid, convert it into the timecounter ns value
+ * and store that result into the shhwtstamps structure which is passed
+ * up the network stack.
+ **/
+static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rxstmp;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
+ !(status & E1000_RXDEXT_STATERR_TST) ||
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ /* The Rx time stamp registers contain the time stamp. No other
+ * received packet will be time stamped until the Rx time stamp
+ * registers are read. Because only one packet can be time stamped
+ * at a time, the register values must belong to this packet and
+ * therefore none of the other additional attributes need to be
+ * compared.
+ */
+ rxstmp = (u64)er32(RXSTMPL);
+ rxstmp |= (u64)er32(RXSTMPH) << 32;
+ e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
+
+ adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle Rx indications
+ * @adapter: board private structure
+ * @staterr: descriptor extended error and status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void e1000_receive_skb(struct e1000_adapter *adapter,
+ struct net_device *netdev, struct sk_buff *skb,
+ u32 staterr, __le16 vlan)
+{
+ u16 tag = le16_to_cpu(vlan);
+
+ e1000e_rx_hwtstamp(adapter, staterr, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (staterr & E1000_RXD_STAT_VP)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+
+ napi_gro_receive(&adapter->napi, skb);
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ struct sk_buff *skb)
+{
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+
+ skb_checksum_none_assert(skb);
+
+ /* Rx checksum disabled */
+ if (!(adapter->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Ignore Checksum bit is set */
+ if (status & E1000_RXD_STAT_IXSM)
+ return;
+
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+
+ /* TCP/UDP Checksum has not been calculated */
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val = __ew32_prepare(hw);
+
+ writel(i, rx_ring->tail);
+
+ if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ u32 rctl = er32(RCTL);
+
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e_err("ME firmware caused invalid RDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val = __ew32_prepare(hw);
+
+ writel(i, tx_ring->tail);
+
+ if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ u32 tctl = er32(TCTL);
+
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ e_err("ME firmware caused invalid TDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: Rx descriptor ring
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
+ int cleaned_count, gfp_t gfp)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (!skb) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+map_skb:
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ break;
+ }
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, i);
+ else
+ writel(i, rx_ring->tail);
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: Rx descriptor ring
+ **/
+static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
+ int cleaned_count, gfp_t gfp)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_packet_split *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (j >= adapter->rx_ps_pages) {
+ /* all unused desc entries get hw null ptr */
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
+ continue;
+ }
+ if (!ps_page->page) {
+ ps_page->page = alloc_page(gfp);
+ if (!ps_page->page) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
+ dev_err(&adapter->pdev->dev,
+ "Rx DMA page map failed\n");
+ adapter->rx_dma_failed++;
+ goto no_buffers;
+ }
+ }
+ /* Refresh the desc even if buffer_addrs
+ * didn't change because each write-back
+ * erases this info.
+ */
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
+ gfp);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ /* cleanup skb */
+ dev_kfree_skb_any(skb);
+ buffer_info->skb = NULL;
+ break;
+ }
+
+ rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, i << 1);
+ else
+ writel(i << 1, rx_ring->tail);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+no_buffers:
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @rx_ring: Rx descriptor ring
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
+ int cleaned_count, gfp_t gfp)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(gfp);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma) {
+ buffer_info->dma = dma_map_page(&pdev->dev,
+ buffer_info->page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, i);
+ else
+ writel(i, rx_ring->tail);
+ }
+}
+
+static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
+ struct sk_buff *skb)
+{
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack
+ * @rx_ring: Rx descriptor ring
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ /* All receives must fit into a single buffer */
+ e_dbg("Receive packet consumed multiple buffers\n");
+ /* recycle */
+ buffer_info->skb = skb;
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+
+ /* adjust length to remove Ethernet CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ /* If configured to store CRC, don't subtract FCS,
+ * but keep the FCS bytes out of the total_rx_bytes
+ * counter
+ */
+ if (netdev->features & NETIF_F_RXFCS)
+ total_rx_bytes -= 4;
+ else
+ length -= 4;
+ }
+
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+ if (length < copybreak) {
+ struct sk_buff *new_skb =
+ napi_alloc_skb(&adapter->napi, length);
+ if (new_skb) {
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ /* else just continue with the old one */
+ }
+ /* end copybreak code */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+static void e1000_put_txbuf(struct e1000_ring *tx_ring,
+ struct e1000_buffer *buffer_info)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+}
+
+static void e1000_print_hw_hang(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ print_hang_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int i = tx_ring->next_to_clean;
+ unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
+ struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status, phy_1000t_status, phy_ext_status;
+ u16 pci_status;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
+ /* May be block on write-back, flush and detect again
+ * flush pending descriptor writebacks to memory
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ /* Due to rare timing issues, write to TIDV again to ensure
+ * the write is successful
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ adapter->tx_hang_recheck = true;
+ return;
+ }
+ adapter->tx_hang_recheck = false;
+
+ if (er32(TDH(0)) == er32(TDT(0))) {
+ e_dbg("false hang detected, ignoring\n");
+ return;
+ }
+
+ /* Real hang detected */
+ netif_stop_queue(netdev);
+
+ e1e_rphy(hw, MII_BMSR, &phy_status);
+ e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
+ e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
+
+ pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
+
+ /* detected Hardware unit hang */
+ e_err("Detected Hardware Unit Hang:\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]:\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n"
+ "MAC Status <%x>\n"
+ "PHY Status <%x>\n"
+ "PHY 1000BASE-T Status <%x>\n"
+ "PHY Extended Status <%x>\n"
+ "PCI Status <%x>\n",
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
+ phy_status, phy_1000t_status, phy_ext_status, pci_status);
+
+ e1000e_dump(adapter);
+
+ /* Suggest workaround for known h/w issue */
+ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
+ e_err("Try turning off Tx pause (flow control) via ethtool\n");
+}
+
+/**
+ * e1000e_tx_hwtstamp_work - check for Tx time stamp
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb. The timestamp must
+ * be for this skb because only one such packet is allowed in the queue.
+ */
+static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ tx_hwtstamp_work);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 txstmp;
+
+ txstmp = er32(TXSTMPL);
+ txstmp |= (u64)er32(TXSTMPH) << 32;
+
+ e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
+
+ skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ } else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ + adapter->tx_timeout_factor * HZ)) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn("clearing Tx timestamp hang\n");
+ } else {
+ /* reschedule to check later */
+ schedule_work(&adapter->tx_hwtstamp_work);
+ }
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: Tx descriptor ring
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ bool cleaned = false;
+
+ dma_rmb(); /* read buffer_info after eop_desc */
+ for (; !cleaned; count++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
+ }
+
+ e1000_put_txbuf(tx_ring, buffer_info);
+ tx_desc->upper.data = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ if (i == tx_ring->next_to_use)
+ break;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
+#define TX_WAKE_THRESHOLD 32
+ if (count && netif_carrier_ok(netdev) &&
+ e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__E1000_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i
+ */
+ adapter->detect_tx_hung = false;
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ + (adapter->tx_timeout_factor * HZ)) &&
+ !(er32(STATUS) & E1000_STATUS_TXOFF))
+ schedule_work(&adapter->print_hang_task);
+ else
+ adapter->tx_hang_recheck = false;
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ return count < tx_ring->count;
+}
+
+/**
+ * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
+ * @rx_ring: Rx descriptor ring
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+ u32 length, staterr;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ skb = buffer_info->skb;
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ /* in the packet split case this is header only */
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ /* see !EOP comment in other Rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ e_dbg("Packet Split buffers didn't pick up the full packet\n");
+ dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ length = le16_to_cpu(rx_desc->wb.middle.length0);
+
+ if (!length) {
+ e_dbg("Last part of the packet spanning multiple descriptors\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ /* Good Receive */
+ skb_put(skb, length);
+
+ {
+ /* this looks ugly, but it seems compiler issues make
+ * it more efficient than reusing j
+ */
+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
+
+ /* page alloc/put takes too long and effects small
+ * packet throughput, so unsplit small packets and
+ * save the alloc/put only valid in softirq (napi)
+ * context to call kmap_*
+ */
+ if (l1 && (l1 <= copybreak) &&
+ ((length + l1) <= adapter->rx_ps_bsize0)) {
+ u8 *vaddr;
+
+ ps_page = &buffer_info->ps_pages[0];
+
+ /* there is no documentation about how to call
+ * kmap_atomic, so we can't hold the mapping
+ * very long
+ */
+ dma_sync_single_for_cpu(&pdev->dev,
+ ps_page->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ vaddr = kmap_atomic(ps_page->page);
+ memcpy(skb_tail_pointer(skb), vaddr, l1);
+ kunmap_atomic(vaddr);
+ dma_sync_single_for_device(&pdev->dev,
+ ps_page->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* remove the CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ if (!(netdev->features & NETIF_F_RXFCS))
+ l1 -= 4;
+ }
+
+ skb_put(skb, l1);
+ goto copydone;
+ } /* if */
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ length = le16_to_cpu(rx_desc->wb.upper.length[j]);
+ if (!length)
+ break;
+
+ ps_page = &buffer_info->ps_pages[j];
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ skb_fill_page_desc(skb, j, ps_page->page, 0, length);
+ ps_page->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE;
+ }
+
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive
+ */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ if (!(netdev->features & NETIF_F_RXFCS))
+ pskb_trim(skb, skb->len - 4);
+ }
+
+copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+ if (rx_desc->wb.upper.header_status &
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ adapter->rx_hdr_split++;
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.middle.vlan);
+
+next_desc:
+ rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
+ buffer_info->skb = NULL;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct skb_shared_info *shinfo;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ ++i;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
+ ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb_irq(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+#define rxtop (rx_ring->rx_skb_top)
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
+ /* re-use the current skb, we only consumed the
+ * page
+ */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page
+ */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
+ kunmap_atomic(vaddr);
+ /* re-use the page, so don't erase
+ * buffer_info->page
+ */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ e_err("pskb_may_pull failed.\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: Rx descriptor ring
+ **/
+static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int i, j;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ if (adapter->clean_rx == e1000_clean_rx_irq)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (!ps_page->page)
+ break;
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ put_page(ps_page->page);
+ ps_page->page = NULL;
+ }
+ }
+
+ /* there also may be some cached data from a chained receive */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ }
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+
+ writel(0, rx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, 0);
+ else
+ writel(0, rx_ring->tail);
+}
+
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ downshift_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ /* read ICR disables interrupts using IAM */
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = true;
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /* 80003ES2LAN workaround-- For packet buffer work-around on
+ * link down event; disable receives here in the ISR and reset
+ * adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ adapter->flags & FLAG_RX_NEEDS_RESTART) {
+ /* disable receives */
+ u32 rctl = er32(RCTL);
+
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, icr = er32(ICR);
+
+ if (!icr || test_bit(__E1000_DOWN, &adapter->state))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+ */
+ if (!(icr & E1000_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ /* Interrupt Auto-Mask...upon reading ICR,
+ * interrupts are masked. No need for the
+ * IMC write
+ */
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = true;
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
+ /* disable receives */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ if (!(icr & E1000_ICR_INT_ASSERTED)) {
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER);
+ return IRQ_NONE;
+ }
+
+ if (icr & adapter->eiac_mask)
+ ew32(ICS, (icr & adapter->eiac_mask));
+
+ if (icr & E1000_ICR_OTHER) {
+ if (!(icr & E1000_ICR_LSC))
+ goto no_link_interrupt;
+ hw->mac.get_link_status = true;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+no_link_interrupt:
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+
+ if (!e1000_clean_tx_irq(tx_ring))
+ /* Ring was not completely cleaned, so fire another interrupt */
+ ew32(ICS, tx_ring->ims_val);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+
+ /* Write the ITR value calculated at the end of the
+ * previous interrupt.
+ */
+ if (rx_ring->set_itr) {
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ rx_ring->itr_register);
+ rx_ring->set_itr = 0;
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ int vector = 0;
+ u32 ctrl_ext, ivar = 0;
+
+ adapter->eiac_mask = 0;
+
+ /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+ if (hw->mac.type == e1000_82574) {
+ u32 rfctl = er32(RFCTL);
+
+ rfctl |= E1000_RFCTL_ACK_DIS;
+ ew32(RFCTL, rfctl);
+ }
+
+ /* Configure Rx vector */
+ rx_ring->ims_val = E1000_IMS_RXQ0;
+ adapter->eiac_mask |= rx_ring->ims_val;
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ rx_ring->itr_register);
+ else
+ writel(1, rx_ring->itr_register);
+ ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+ /* Configure Tx vector */
+ tx_ring->ims_val = E1000_IMS_TXQ0;
+ vector++;
+ if (tx_ring->itr_val)
+ writel(1000000000 / (tx_ring->itr_val * 256),
+ tx_ring->itr_register);
+ else
+ writel(1, tx_ring->itr_register);
+ adapter->eiac_mask |= tx_ring->ims_val;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+ /* set vector for Other Causes, e.g. link changes */
+ vector++;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ hw->hw_addr + E1000_EITR_82574(vector));
+ else
+ writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+ /* Cause Tx interrupts on every write back */
+ ivar |= (1 << 31);
+
+ ew32(IVAR, ivar);
+
+ /* enable MSI-X PBA support */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask Other interrupts upon ICR read */
+ ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+ ctrl_ext |= E1000_CTRL_EXT_EIAME;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & FLAG_MSI_ENABLED) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~FLAG_MSI_ENABLED;
+ }
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+ int err;
+ int i;
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ if (adapter->flags & FLAG_HAS_MSIX) {
+ adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+ adapter->msix_entries = kcalloc(adapter->num_vectors,
+ sizeof(struct
+ msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
+ for (i = 0; i < adapter->num_vectors; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
+ return;
+ }
+ /* MSI-X failed, so fall through and try MSI */
+ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
+ e1000e_reset_interrupt_capability(adapter);
+ }
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ /* Fall through */
+ case E1000E_INT_MODE_MSI:
+ if (!pci_enable_msi(adapter->pdev)) {
+ adapter->flags |= FLAG_MSI_ENABLED;
+ } else {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
+ }
+ /* Fall through */
+ case E1000E_INT_MODE_LEGACY:
+ /* Don't do anything; this is the system default */
+ break;
+ }
+
+ /* store the number of vectors being used */
+ adapter->num_vectors = 1;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0, vector = 0;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
+ else
+ memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
+ if (err)
+ return err;
+ adapter->rx_ring->itr_register = adapter->hw.hw_addr +
+ E1000_EITR_82574(vector);
+ adapter->rx_ring->itr_val = adapter->itr;
+ vector++;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
+ else
+ memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
+ if (err)
+ return err;
+ adapter->tx_ring->itr_register = adapter->hw.hw_addr +
+ E1000_EITR_82574(vector);
+ adapter->tx_ring->itr_val = adapter->itr;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_msix_other, 0, netdev->name, netdev);
+ if (err)
+ return err;
+
+ e1000_configure_msix(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (adapter->msix_entries) {
+ err = e1000_request_msix(adapter);
+ if (!err)
+ return err;
+ /* fall back to MSI */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ if (adapter->flags & FLAG_MSI_ENABLED) {
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
+ netdev->name, netdev);
+ if (!err)
+ return err;
+
+ /* fall back to legacy interrupt */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ }
+
+ err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
+ netdev->name, netdev);
+ if (err)
+ e_err("Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->msix_entries) {
+ int vector = 0;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ /* Other Causes interrupt vector */
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ return;
+ }
+
+ free_irq(adapter->pdev->irq, netdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ if (adapter->msix_entries)
+ ew32(EIAC_82574, 0);
+ e1e_flush();
+
+ if (adapter->msix_entries) {
+ int i;
+
+ for (i = 0; i < adapter->num_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ **/
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+ ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
+ } else {
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+ e1e_flush();
+}
+
+/**
+ * e1000e_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ **/
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware know the driver has taken over */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * e1000e_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware taken over control of h/w */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * e1000_alloc_ring_dma - allocate memory for a ring structure
+ **/
+static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
+ struct e1000_ring *ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: Tx descriptor ring
+ *
+ * Return 0 on success, negative on failure
+ **/
+int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ int err = -ENOMEM, size;
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, tx_ring);
+ if (err)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+err:
+ vfree(tx_ring->buffer_info);
+ e_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: Rx descriptor ring
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct e1000_buffer *buffer_info;
+ int i, size, desc_len, err = -ENOMEM;
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info)
+ goto err;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
+ sizeof(struct e1000_ps_page),
+ GFP_KERNEL);
+ if (!buffer_info->ps_pages)
+ goto err_pages;
+ }
+
+ desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, rx_ring);
+ if (err)
+ goto err_pages;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->rx_skb_top = NULL;
+
+ return 0;
+
+err_pages:
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ kfree(buffer_info->ps_pages);
+ }
+err:
+ vfree(rx_ring->buffer_info);
+ e_err("Unable to allocate memory for the receive descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: Tx descriptor ring
+ **/
+static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(tx_ring, buffer_info);
+ }
+
+ netdev_reset_queue(adapter->netdev);
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ writel(0, tx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring, 0);
+ else
+ writel(0, tx_ring->tail);
+}
+
+/**
+ * e1000e_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring
+ *
+ * Free all transmit software resources
+ **/
+void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000e_free_rx_resources - Free Rx Resources
+ * @rx_ring: Rx descriptor ring
+ *
+ * Free all receive software resources
+ **/
+void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
+{
+ struct e1000_adapter *adapter = rx_ring->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ e1000_clean_rx_ring(rx_ring);
+
+ for (i = 0; i < rx_ring->count; i++)
+ kfree(rx_ring->buffer_info[i].ps_pages);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput. This functionality is controlled
+ * by the InterruptThrottleRate module parameter.
+ **/
+static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ return itr_setting;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (adapter->link_speed != SPEED_1000) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ new_itr = 0;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ /* counts and packets in update_itr are dependent on these numbers */
+ switch (current_itr) {
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
+ adapter->itr = new_itr;
+ adapter->rx_ring->itr_val = new_itr;
+ if (adapter->msix_entries)
+ adapter->rx_ring->set_itr = 1;
+ else
+ e1000e_write_itr(adapter, new_itr);
+ }
+}
+
+/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+ if (adapter->msix_entries) {
+ int vector;
+
+ for (vector = 0; vector < adapter->num_vectors; vector++)
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+ } else {
+ ew32(ITR, new_itr);
+ }
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ int size = sizeof(struct e1000_ring);
+
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err;
+ adapter->tx_ring->count = adapter->tx_ring_count;
+ adapter->tx_ring->adapter = adapter;
+
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err;
+ adapter->rx_ring->count = adapter->rx_ring_count;
+ adapter->rx_ring->adapter = adapter;
+
+ return 0;
+err:
+ e_err("Unable to allocate memory for queues\n");
+ kfree(adapter->rx_ring);
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+}
+
+/**
+ * e1000e_poll - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @weight: number of packets driver is allowed to process this poll
+ **/
+static int e1000e_poll(struct napi_struct *napi, int weight)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *poll_dev = adapter->netdev;
+ int tx_cleaned = 1, work_done = 0;
+
+ adapter = netdev_priv(poll_dev);
+
+ if (!adapter->msix_entries ||
+ (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+ tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
+
+ adapter->clean_rx(adapter->rx_ring, &work_done, weight);
+
+ if (!tx_cleaned)
+ work_done = weight;
+
+ /* If weight not fully consumed, exit the polling mode */
+ if (work_done < weight) {
+ if (adapter->itr_setting & 3)
+ e1000_set_itr(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ ew32(IMS, adapter->rx_ring->ims_val);
+ else
+ e1000_irq_enable(adapter);
+ }
+ }
+
+ return work_done;
+}
+
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ /* don't update vlan cookie if already programmed */
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id))
+ return 0;
+
+ /* add VID to filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ set_bit(vid, adapter->active_vlans);
+
+ return 0;
+}
+
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000e_release_hw_control(adapter);
+ return 0;
+ }
+
+ /* remove VID from filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ clear_bit(vid, adapter->active_vlans);
+
+ return 0;
+}
+
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+ ew32(RCTL, rctl);
+
+ if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ }
+}
+
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ }
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
+ adapter->mng_vlan_id = vid;
+ }
+
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
+}
+
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 manc, manc2h, mdef, i, j;
+
+ if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
+ return;
+
+ manc = er32(MANC);
+
+ /* enable receiving management packets to the host. this will probably
+ * generate destination unreachable messages from the host OS, but
+ * the packets will be handled on SMBUS
+ */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h = er32(MANC2H);
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /* Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
+ ew32(MANC2H, manc2h);
+ ew32(MANC, manc);
+}
+
+/**
+ * e1000_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ u64 tdba;
+ u32 tdlen, tctl, tarc;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = tx_ring->dma;
+ tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
+ ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+ ew32(TDBAH(0), (tdba >> 32));
+ ew32(TDLEN(0), tdlen);
+ ew32(TDH(0), 0);
+ ew32(TDT(0), 0);
+ tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
+ tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
+
+ /* Set the Tx Interrupt Delay register */
+ ew32(TIDV, adapter->tx_int_delay);
+ /* Tx irq moderation */
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ u32 txdctl = er32(TXDCTL(0));
+
+ txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+ E1000_TXDCTL_WTHRESH);
+ /* set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+ * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+ * there are Tx hangs or other Tx related bugs
+ */
+ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+ }
+ /* erratum work around: set txdctl the same for both queues */
+ ew32(TXDCTL(1), er32(TXDCTL(0)));
+
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
+ tarc = er32(TARC(0));
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later
+ */
+#define SPEED_MODE_BIT (1 << 21)
+ tarc |= SPEED_MODE_BIT;
+ ew32(TARC(0), tarc);
+ }
+
+ /* errata: program both queues to unweighted RR */
+ if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
+ tarc = er32(TARC(0));
+ tarc |= 1;
+ ew32(TARC(0), tarc);
+ tarc = er32(TARC(1));
+ tarc |= 1;
+ ew32(TARC(1), tarc);
+ }
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ /* enable Report Status bit */
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ ew32(TCTL, tctl);
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ /* SPT Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 reg_val;
+
+ reg_val = er32(IOSFPC);
+ reg_val |= E1000_RCTL_RDMTS_HEX;
+ ew32(IOSFPC, reg_val);
+
+ reg_val = er32(TARC(0));
+ reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+ ew32(TARC(0), reg_val);
+ }
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, rfctl;
+ u32 pages = 0;
+
+ /* Workaround Si errata on PCHx - configure jumbo frame flow.
+ * If jumbo frames not set, program related MAC/PHY registers
+ * to h/w defaults
+ */
+ if (hw->mac.type >= e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
+ }
+
+ /* Program MC offset vector base */
+ rctl = er32(RCTL);
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Do not Store bad packets */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Long Packet receive */
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Some systems expect that the CRC is included in SMBUS traffic. The
+ * hardware strips the CRC before sending to both SMBUS (BMC) and to
+ * host memory when this is enabled
+ */
+ if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+ rctl |= E1000_RCTL_SECRC;
+
+ /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+ if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+ u16 phy_data;
+
+ e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+ phy_data &= 0xfff8;
+ phy_data |= (1 << 2);
+ e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+ e1e_rphy(hw, 22, &phy_data);
+ phy_data &= 0x0fff;
+ phy_data |= (1 << 14);
+ e1e_wphy(hw, 0x10, 0x2823);
+ e1e_wphy(hw, 0x11, 0x0003);
+ e1e_wphy(hw, 22, phy_data);
+ }
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case 2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case 4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case 8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case 16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ /* Enable Extended Status in all Receive Descriptors */
+ rfctl = er32(RFCTL);
+ rfctl |= E1000_RFCTL_EXTEN;
+ ew32(RFCTL, rfctl);
+
+ /* 82571 and greater support packet-split where the protocol
+ * header is placed in skb->data and the packet data is
+ * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+ * In the case of a non-split, skb->data is linearly filled,
+ * followed by the page buffers. Therefore, skb->data is
+ * sized to hold the largest protocol header.
+ *
+ * allocations using alloc_page take too long for regular MTU
+ * so only enable packet split for jumbo frames
+ *
+ * Using pages when the page size is greater than 16k wastes
+ * a lot of memory, since we allocate 3 pages at all times
+ * per packet.
+ */
+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+ if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+ adapter->rx_ps_pages = pages;
+ else
+ adapter->rx_ps_pages = 0;
+
+ if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
+ /* Enable Packet split descriptors */
+ rctl |= E1000_RCTL_DTYP_PS;
+
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
+
+ switch (adapter->rx_ps_pages) {
+ case 3:
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
+ /* fall-through */
+ case 2:
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
+ /* fall-through */
+ case 1:
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
+ break;
+ }
+
+ ew32(PSRCTL, psrctl);
+ }
+
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in e1000e_set_rx_mode
+ */
+ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+ E1000_RCTL_BAM | /* RX All Bcast Pkts */
+ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+ E1000_RCTL_DPF | /* Allow filtered pause */
+ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+ /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+ * and that breaks VLANs.
+ */
+ }
+
+ ew32(RCTL, rctl);
+ /* just started the receive unit, no need to restart */
+ adapter->flags &= ~FLAG_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ u64 rdba;
+ u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+ if (adapter->rx_ps_pages) {
+ /* this is a 32 byte descriptor */
+ rdlen = rx_ring->count *
+ sizeof(union e1000_rx_desc_packet_split);
+ adapter->clean_rx = e1000_clean_rx_irq_ps;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+ } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+ } else {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ }
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ /* set the writeback threshold (only takes effect if the RDTR
+ * is set). set GRAN=1 and write back up to 0x4 worth, and
+ * enable prefetching of 0x20 Rx descriptors
+ * granularity = 01
+ * wthresh = 04,
+ * hthresh = 04,
+ * pthresh = 0x20
+ */
+ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+ ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+ /* override the delay timers for enabling bursting, only if
+ * the value was not set by the user via module options
+ */
+ if (adapter->rx_int_delay == DEFAULT_RDTR)
+ adapter->rx_int_delay = BURST_RDTR;
+ if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+ adapter->rx_abs_int_delay = BURST_RADV;
+ }
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ /* irq moderation */
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if ((adapter->itr_setting != 0) && (adapter->itr != 0))
+ e1000e_write_itr(adapter, adapter->itr);
+
+ ctrl_ext = er32(CTRL_EXT);
+ /* Auto-Mask interrupts upon ICR access */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ rdba = rx_ring->dma;
+ ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+ ew32(RDBAH(0), (rdba >> 32));
+ ew32(RDLEN(0), rdlen);
+ ew32(RDH(0), 0);
+ ew32(RDT(0), 0);
+ rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
+ rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ rxcsum = er32(RXCSUM);
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ ew32(RXCSUM, rxcsum);
+
+ /* With jumbo frames, excessive C-state transition latencies result
+ * in dropped transactions.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 lat =
+ ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
+ adapter->max_frame_size) * 8 / 1000;
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ u32 rxdctl = er32(RXDCTL(0));
+
+ ew32(RXDCTL(0), rxdctl | 0x3);
+ }
+
+ pm_qos_update_request(&adapter->pm_qos_req, lat);
+ } else {
+ pm_qos_update_request(&adapter->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* update_mc_addr_list expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int rar_entries;
+ int count = 0;
+
+ rar_entries = hw->mac.ops.rar_get_count(hw);
+
+ /* save a rar entry for our hardware address */
+ rar_entries--;
+
+ /* save a rar entry for the LAA workaround */
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+ rar_entries--;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ /* write the addresses in reverse order to avoid write
+ * combining
+ */
+ netdev_for_each_uc_addr(ha, netdev) {
+ int rval;
+
+ if (!rar_entries)
+ break;
+ rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (rval < 0)
+ return -ENOMEM;
+ count++;
+ }
+ }
+
+ /* zero out the remaining RAR entries not used above */
+ for (; rar_entries > 0; rar_entries--) {
+ ew32(RAH(rar_entries), 0);
+ ew32(RAL(rar_entries), 0);
+ }
+ e1e_flush();
+
+ return count;
+}
+
+/**
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000e_set_rx_mode(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (pm_runtime_suspended(netdev->dev.parent))
+ return;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = er32(RCTL);
+
+ /* clear the affected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ /* Do not hardware filter VLANs in promisc mode */
+ e1000e_vlan_filter_disable(adapter);
+ } else {
+ int count;
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = e1000e_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_MPE;
+ }
+ e1000e_vlan_filter_enable(adapter);
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = e1000e_write_uc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_UPE;
+ }
+
+ ew32(RCTL, rctl);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ e1000e_vlan_strip_enable(adapter);
+ else
+ e1000e_vlan_strip_disable(adapter);
+}
+
+static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc, rxcsum;
+ u32 rss_key[10];
+ int i;
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (i = 0; i < 10; i++)
+ ew32(RSSRK(i), rss_key[i]);
+
+ /* Direct all traffic to queue 0 */
+ for (i = 0; i < 32; i++)
+ ew32(RETA(i), 0);
+
+ /* Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback.
+ */
+ rxcsum = er32(RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ ew32(RXCSUM, rxcsum);
+
+ mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+ ew32(MRQC, mrqc);
+}
+
+/**
+ * e1000e_get_base_timinca - get default SYSTIM time increment attributes
+ * @adapter: board private structure
+ * @timinca: pointer to returned time increment attributes
+ *
+ * Get attributes for incrementing the System Time Register SYSTIML/H at
+ * the default base frequency, and set the cyclecounter shift value.
+ **/
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 incvalue, incperiod, shift;
+
+ /* Make sure clock is enabled on I217/I218/I219 before checking
+ * the frequency
+ */
+ if (((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) &&
+ !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
+ u32 fextnvm7 = er32(FEXTNVM7);
+
+ if (!(fextnvm7 & (1 << 0))) {
+ ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ e1e_flush();
+ }
+ }
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ /* On I217, I218 and I219, the clock frequency is 25MHz
+ * or 96MHz as indicated by the System Clock Frequency
+ * Indication
+ */
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
+ ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
+
+ return 0;
+}
+
+/**
+ * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
+ * @adapter: board private structure
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
+ struct hwtstamp_config *config)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 rxmtrl = 0;
+ u16 rxudp = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return -EINVAL;
+
+ /* flags reserved for future extensions - must be zero */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ /* Also time stamps V2 L2 Path Delay Request/Response */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /* Also time stamps V2 L2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* Hardware cannot filter just V2 L4 Sync messages;
+ * fall-through to V2 (both L2 and L4) Sync.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* Hardware cannot filter just V2 L4 Delay Request messages;
+ * fall-through to V2 (both L2 and L4) Delay Request.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ /* Hardware cannot filter just V2 L4 or L2 Event messages;
+ * fall-through to all V2 (both L2 and L4) Events.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* For V1, the hardware can only filter Sync messages or
+ * Delay Request messages but not both so fall-through to
+ * time stamp all packets.
+ */
+ case HWTSTAMP_FILTER_ALL:
+ is_l2 = true;
+ is_l4 = true;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ adapter->hwtstamp_config = *config;
+
+ /* enable/disable Tx h/w time stamping */
+ regval = er32(TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ ew32(TSYNCTXCTL, regval);
+ if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
+ (regval & E1000_TSYNCTXCTL_ENABLED)) {
+ e_err("Timesync Tx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* enable/disable Rx h/w time stamping */
+ regval = er32(TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ ew32(TSYNCRXCTL, regval);
+ if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK)) !=
+ (regval & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK))) {
+ e_err("Timesync Rx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* L2: define ethertype filter for time stamped packets */
+ if (is_l2)
+ rxmtrl |= ETH_P_1588;
+
+ /* define which PTP packets get time stamped */
+ ew32(RXMTRL, rxmtrl);
+
+ /* Filter by destination port */
+ if (is_l4) {
+ rxudp = PTP_EV_PORT;
+ cpu_to_be16s(&rxudp);
+ }
+ ew32(RXUDP, rxudp);
+
+ e1e_flush();
+
+ /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
+ er32(RXSTMPH);
+ er32(TXSTMPH);
+
+ /* Get and set the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &regval);
+ if (ret_val)
+ return ret_val;
+ ew32(TIMINCA, regval);
+
+ /* reset the ns time counter */
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+/**
+ * e1000_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+
+ e1000e_set_rx_mode(adapter->netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability_pt(adapter);
+
+ e1000_configure_tx(adapter);
+
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ e1000e_setup_rss_hash(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
+}
+
+/**
+ * e1000e_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000e_reset ***
+ **/
+void e1000e_power_up_phy(struct e1000_adapter *adapter)
+{
+ if (adapter->hw.phy.ops.power_up)
+ adapter->hw.phy.ops.power_up(&adapter->hw);
+
+ adapter->hw.mac.ops.setup_link(&adapter->hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down the PHY
+ *
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
+ */
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ if (adapter->hw.phy.ops.power_down)
+ adapter->hw.phy.ops.power_down(&adapter->hw);
+}
+
+/**
+ * e1000e_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+void e1000e_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_fc_info *fc = &adapter->hw.fc;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tx_space, min_tx_space, min_rx_space;
+ u32 pba = adapter->pba;
+ u16 hwm;
+
+ /* reset Packet Buffer Allocation to default */
+ ew32(PBA, pba);
+
+ if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB.
+ */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /* the Tx fifo also stores 16 bytes of information about the Tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (adapter->max_frame_size +
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = adapter->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation
+ */
+ if ((tx_space < min_tx_space) &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba -= min_tx_space - tx_space;
+
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment
+ */
+ if (pba < min_rx_space)
+ pba = min_rx_space;
+ }
+
+ ew32(PBA, pba);
+ }
+
+ /* flow control settings
+ *
+ * The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, and
+ * - the full Rx FIFO size minus one full frame
+ */
+ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+ fc->pause_time = 0xFFFF;
+ else
+ fc->pause_time = E1000_FC_PAUSE_TIME;
+ fc->send_xon = true;
+ fc->current_mode = fc->requested_mode;
+
+ switch (hw->mac.type) {
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ pba = 14;
+ ew32(PBA, pba);
+ fc->high_water = 0x2800;
+ fc->low_water = fc->high_water - 8;
+ break;
+ }
+ /* fall-through */
+ default:
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
+
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pchlan:
+ /* Workaround PCH LOM adapter hangs with certain network
+ * loads. If hangs persist, try disabling Tx flow control.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ fc->high_water = 0x3500;
+ fc->low_water = 0x1500;
+ } else {
+ fc->high_water = 0x5000;
+ fc->low_water = 0x3000;
+ }
+ fc->refresh_time = 0x1000;
+ break;
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ fc->refresh_time = 0x0400;
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN) {
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ break;
+ }
+
+ pba = 14;
+ ew32(PBA, pba);
+ fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
+ fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
+ break;
+ }
+
+ /* Alignment of Tx data is on an arbitrary byte boundary with the
+ * maximum size per Tx descriptor limited only to the transmit
+ * allocation of the packet buffer minus 96 bytes with an upper
+ * limit of 24KB due to receive synchronization limitations.
+ */
+ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+ 24 << 10);
+
+ /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer.
+ */
+ if (adapter->itr_setting & 0x3) {
+ if ((adapter->max_frame_size * 2) > (pba << 10)) {
+ if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate off\n");
+ adapter->flags2 |= FLAG2_DISABLE_AIM;
+ e1000e_write_itr(adapter, 0);
+ }
+ } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate on\n");
+ adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+ adapter->itr = 20000;
+ e1000e_write_itr(adapter, adapter->itr);
+ }
+ }
+
+ /* Allow time for pending master requests to run */
+ mac->ops.reset_hw(hw);
+
+ /* For parts with AMT enabled, let the firmware know
+ * that the network interface is in control
+ */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ ew32(WUC, 0);
+
+ if (mac->ops.init_hw(hw))
+ e_err("Hardware Error\n");
+
+ e1000_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETH_P_8021Q);
+
+ e1000e_reset_adaptive(hw);
+
+ /* initialize systim and reset the ns time counter */
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+
+ /* Set EEE advertisement as appropriate */
+ if (adapter->flags2 & FLAG2_HAS_EEE) {
+ s32 ret_val;
+ u16 adv_addr;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "Invalid PHY type setting EEE advertisement\n");
+ return;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ dev_err(&adapter->pdev->dev,
+ "EEE advertisement - unable to acquire PHY\n");
+ return;
+ }
+
+ e1000_write_emi_reg_locked(hw, adv_addr,
+ hw->dev_spec.ich8lan.eee_disable ?
+ 0 : adapter->eee_advert);
+
+ hw->phy.ops.release(hw);
+ }
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000_power_down_phy(adapter);
+
+ e1000_get_phy_info(hw);
+
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+ !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+ u16 phy_data = 0;
+ /* speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed
+ */
+ e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ }
+}
+
+int e1000e_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ if (adapter->msix_entries)
+ e1000_configure_msix(adapter);
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(adapter->netdev);
+
+ /* fire a link change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+}
+
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+
+ /* due to rare timing issues, write to TIDV/RDTR again to ensure the
+ * write is successful
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
+/**
+ * e1000e_down - quiesce the device and optionally reset the hardware
+ * @adapter: board private structure
+ * @reset: boolean flag to reset the hardware or not
+ */
+void e1000e_down(struct e1000_adapter *adapter, bool reset)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer
+ */
+ set_bit(__E1000_DOWN, &adapter->state);
+
+ netif_carrier_off(netdev);
+
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /* flush both disables and wait for them to finish */
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ e1000_irq_disable(adapter);
+
+ napi_synchronize(&adapter->napi);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ e1000e_flush_descriptors(adapter);
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* Disable Si errata workaround on PCHx for jumbo frame flow */
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, false))
+ e_dbg("failed to disable jumbo frame workaround mode\n");
+
+ if (reset && !pci_channel_offline(adapter->pdev))
+ e1000e_reset(adapter);
+}
+
+void e1000e_reinit_locked(struct e1000_adapter *adapter)
+{
+ might_sleep();
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ e1000e_down(adapter, true);
+ e1000e_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+ struct e1000_hw *hw = &adapter->hw;
+ cycle_t systim, systim_next;
+
+ /* latch SYSTIMH on read of SYSTIML */
+ systim = (cycle_t)er32(SYSTIML);
+ systim |= (cycle_t)er32(SYSTIMH) << 32;
+
+ if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+ u64 incvalue, time_delta, rem, temp;
+ int i;
+
+ /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue
+ */
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ rem = do_div(temp, incvalue);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+ (rem == 0))
+ break;
+ }
+ }
+ return systim;
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_ps_bsize0 = 128;
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ adapter->tx_ring_count = E1000_DEFAULT_TXD;
+ adapter->rx_ring_count = E1000_DEFAULT_RXD;
+
+ spin_lock_init(&adapter->stats64_lock);
+
+ e1000e_set_interrupt_capability(adapter);
+
+ if (e1000_alloc_queues(adapter))
+ return -ENOMEM;
+
+ /* Setup hardware time stamping cyclecounter */
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ adapter->cc.read = e1000e_cyclecounter_read;
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
+ adapter->cc.mult = 1;
+ /* cc.shift set in e1000e_get_base_tininca() */
+
+ spin_lock_init(&adapter->systim_lock);
+ INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ set_bit(__E1000_DOWN, &adapter->state);
+ return 0;
+}
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ e_dbg("icr is %08X\n", icr);
+ if (icr & E1000_ICR_RXSEQ) {
+ adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ /* Force memory writes to complete before acknowledging the
+ * interrupt is handled.
+ */
+ wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* poll_enable hasn't been called yet, so don't need disable */
+ /* clear any pending events */
+ er32(ICR);
+
+ /* free the real vector and request a test handler */
+ e1000_free_irq(adapter);
+ e1000e_reset_interrupt_capability(adapter);
+
+ /* Assume that the test fails, if it succeeds then the test
+ * MSI irq handler will unset this flag
+ */
+ adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err)
+ goto msi_test_failed;
+
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
+ netdev->name, netdev);
+ if (err) {
+ pci_disable_msi(adapter->pdev);
+ goto msi_test_failed;
+ }
+
+ /* Force memory writes to complete before enabling and firing an
+ * interrupt.
+ */
+ wmb();
+
+ e1000_irq_enable(adapter);
+
+ /* fire an unusual interrupt on the test handler */
+ ew32(ICS, E1000_ICS_RXSEQ);
+ e1e_flush();
+ msleep(100);
+
+ e1000_irq_disable(adapter);
+
+ rmb(); /* read flags after interrupt has been fired */
+
+ if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_info("MSI interrupt test failed, using legacy interrupt.\n");
+ } else {
+ e_dbg("MSI interrupt test succeeded!\n");
+ }
+
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+
+msi_test_failed:
+ e1000e_set_interrupt_capability(adapter);
+ return e1000_request_irq(adapter);
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!(adapter->flags & FLAG_MSI_ENABLED))
+ return 0;
+
+ /* disable SERR in case the MSI write causes a master abort */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_SERR)
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = e1000_test_msi_interrupt(adapter);
+
+ /* re-enable SERR */
+ if (pci_cmd & PCI_COMMAND_SERR) {
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_SERR;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->state))
+ return -EBUSY;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000e_setup_tx_resources(adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000e_setup_rx_resources(adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000e_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ e1000_update_mng_vlan(adapter);
+
+ /* DMA latency requirement to workaround jumbo issue */
+ pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so.
+ */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* Work around PCIe errata with MSI interrupts causing some chipsets to
+ * ignore e1000e MSI messages, which means we need to test our MSI
+ * interrupt now
+ */
+ if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
+ err = e1000_test_msi(adapter);
+ if (err) {
+ e_err("Interrupt allocation failed\n");
+ goto err_req_irq;
+ }
+ }
+
+ /* From here on the code is the same as e1000e_up() */
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ adapter->tx_hang_recheck = false;
+ netif_start_queue(netdev);
+
+ hw->mac.get_link_status = true;
+ pm_runtime_put(&pdev->dev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+
+err_req_irq:
+ e1000e_release_hw_control(adapter);
+ e1000_power_down_phy(adapter);
+ e1000e_free_rx_resources(adapter->rx_ring);
+err_setup_rx:
+ e1000e_free_tx_resources(adapter->tx_ring);
+err_setup_tx:
+ e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter, true);
+ e1000_free_irq(adapter);
+
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ }
+
+ napi_disable(&adapter->napi);
+
+ e1000e_free_tx_resources(adapter->tx_ring);
+ e1000e_free_rx_resources(adapter->rx_ring);
+
+ /* kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now closed
+ */
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
+
+ pm_qos_remove_request(&adapter->pm_qos_req);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
+ /* activate the work around */
+ e1000e_set_laa_state_82571(&adapter->hw, 1);
+
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventually the LAA will be in RAR[0] and
+ * RAR[14]
+ */
+ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
+ adapter->hw.mac.rar_entry_count - 1);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ update_phy_task);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000_get_phy_info(hw);
+
+ /* Enable EEE on 82579 after link up */
+ if (hw->phy.type >= e1000_phy_82579)
+ e1000_set_eee_pchlan(hw);
+}
+
+/**
+ * e1000_update_phy_info - timre call-back to update PHY info
+ * @data: pointer to adapter cast into an unsigned long
+ *
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ **/
+static void e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ schedule_work(&adapter->update_phy_task);
+}
+
+/**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ *
+ * Read/clear the upper 16-bit PHY registers and read/accumulate lower
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val;
+ u16 phy_data;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ /* A page set is expensive so check if already on desired page.
+ * If not, set to the page with the PHY status registers.
+ */
+ hw->phy.addr = 1;
+ ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ &phy_data);
+ if (ret_val)
+ goto release;
+ if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Single Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.scc += phy_data;
+
+ /* Excessive Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.ecol += phy_data;
+
+ /* Multiple Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.mcc += phy_data;
+
+ /* Late Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.latecol += phy_data;
+
+ /* Collision Count - also used for adaptive IFS */
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ if (!ret_val)
+ hw->mac.collision_delta = phy_data;
+
+ /* Defer Count */
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.dc += phy_data;
+
+ /* Transmit with no CRS */
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.tncrs += phy_data;
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000e_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_stats(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorc += er32(GORCL);
+ er32(GORCH); /* Clear gorc */
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ adapter->stats.mpc += er32(MPC);
+
+ /* Half-duplex statistics */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+ e1000e_update_phy_stats(adapter);
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+
+ hw->mac.collision_delta = er32(COLC);
+
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
+ adapter->stats.colc += hw->mac.collision_delta;
+ }
+
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotc += er32(GOTCL);
+ er32(GOTCH); /* Clear gotc */
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->mac.tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->mac.tx_packet_delta;
+
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ netdev->stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
+ netdev->stats.rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+ netdev->stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
+ netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+ netdev->stats.tx_window_errors = adapter->stats.latecol;
+ netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+
+ /* Correctable ECC Errors */
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ }
+}
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_phy_regs *phy = &adapter->phy_regs;
+
+ if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
+ (er32(STATUS) & E1000_STATUS_LU) &&
+ (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
+ ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
+ ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
+ ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
+ if (ret_val)
+ e_warn("Error reading PHY register\n");
+ } else {
+ /* Do not read PHY registers if link is not up
+ * Set values to typical power-on defaults
+ */
+ phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+ phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+ BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+ BMSR_ERCAP);
+ phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ phy->lpa = 0;
+ phy->expansion = EXPANSION_ENABLENPAGE;
+ phy->ctrl1000 = ADVERTISE_1000FULL;
+ phy->stat1000 = 0;
+ phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+ }
+}
+
+static void e1000_print_link_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+
+ /* Link status message must follow this format for user tools */
+ pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name, adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+}
+
+static bool e1000e_has_link(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = false;
+ s32 ret_val = 0;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * Rx sequence error interrupt. get_link_status will stay
+ * false until the check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac.get_link_status) {
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ } else {
+ link_active = true;
+ }
+ break;
+ case e1000_media_type_fiber:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = adapter->hw.mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
+ break;
+ }
+
+ if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+ e_info("Gigabit has been disabled, downgrading speed\n");
+ }
+
+ return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+ /* make sure the receive unit is started */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RESTART_NOW)) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl = er32(RCTL);
+
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ adapter->flags &= ~FLAG_RESTART_NOW;
+ }
+}
+
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ e_dbg("PHY appears hung - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_phy_info *phy = &adapter->hw.phy;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 link, tctl;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ link = e1000e_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ e1000e_enable_receives(adapter);
+ goto link_up;
+ }
+
+ if ((e1000e_enable_tx_pkt_filtering(hw)) &&
+ (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
+ e1000_update_mng_vlan(adapter);
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ bool txb2b = true;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ /* update snapshot of PHY registers on LSC */
+ e1000_phy_read_status(adapter);
+ mac->ops.get_link_up_info(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
+ /* On supported PHYs, check for duplex mismatch only
+ * if link has autonegotiated at 10/100 half
+ */
+ if ((hw->phy.type == e1000_phy_igp_3 ||
+ hw->phy.type == e1000_phy_bm) &&
+ hw->mac.autoneg &&
+ (adapter->link_speed == SPEED_10 ||
+ adapter->link_speed == SPEED_100) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ u16 autoneg_exp;
+
+ e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
+
+ if (!(autoneg_exp & EXPANSION_NWAY))
+ e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
+ }
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = false;
+ adapter->tx_timeout_factor = 16;
+ break;
+ case SPEED_100:
+ txb2b = false;
+ adapter->tx_timeout_factor = 10;
+ break;
+ }
+
+ /* workaround: re-program speed mode bit after
+ * link-up event
+ */
+ if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
+ !txb2b) {
+ u32 tarc0;
+
+ tarc0 = er32(TARC(0));
+ tarc0 &= ~SPEED_MODE_BIT;
+ ew32(TARC(0), tarc0);
+ }
+
+ /* disable TSO for pcie and 10/100 speeds, to avoid
+ * some hardware issues
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ }
+
+ /* enable transmits in the hardware, need to do this
+ * after setting TARC(0)
+ */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /* Perform any post-link-up configuration before
+ * reporting link up.
+ */
+ if (phy->ops.cfg_on_link_up)
+ phy->ops.cfg_on_link_up(hw);
+
+ netif_carrier_on(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netif_carrier_off(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
+ */
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ adapter->flags |= FLAG_RESTART_NOW;
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
+ }
+ }
+
+link_up:
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+
+ mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+ adapter->gorc_old = adapter->stats.gorc;
+ adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+ adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
+
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
+ /* If reset is necessary, do it outside of interrupt context. */
+ if (adapter->flags & FLAG_RESTART_NOW) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+
+ e1000e_update_adaptive(&adapter->hw);
+
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ e1000e_write_itr(adapter, itr);
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->msix_entries)
+ ew32(ICS, adapter->rx_ring->ims_val);
+ else
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0]
+ */
+ if (e1000e_get_laa_state_82571(hw))
+ hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
+ /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
+ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
+ er32(RXSTMPH);
+ adapter->rx_hwtstamp_cleared++;
+ } else {
+ adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
+ }
+ }
+
+ /* Reset the timer */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_NO_FCS 0x00000010
+#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return 1;
+}
+
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn("checksum_partial proto=%x!\n",
+ be16_to_cpu(protocol));
+ break;
+ }
+
+ css = skb_checksum_start_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ unsigned int first, unsigned int max_per_txd,
+ unsigned int nr_frags)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f, bytecount, segs;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = false;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = skb_frag_size(frag);
+ offset = 0;
+
+ while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size,
+ DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = true;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+ }
+ }
+
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
+ buffer_info->dma = 0;
+ if (count)
+ count--;
+
+ while (count--) {
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(tx_ring, buffer_info);
+ }
+
+ return 0;
+}
+
+static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (tx_flags & E1000_TX_FLAGS_TSO) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (tx_flags & E1000_TX_FLAGS_IPV4)
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_CSUM) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_VLAN) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ txd_lower &= ~(E1000_TXD_CMD_IFCS);
+
+ if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
+ }
+
+ i = tx_ring->next_to_use;
+
+ do {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data = cpu_to_le32(txd_lower |
+ buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ } while (--count > 0);
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 length, offset;
+
+ if (skb_vlan_tag_present(skb) &&
+ !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
+
+ if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
+ return 0;
+
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
+ return 0;
+
+ {
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
+ struct udphdr *udp;
+
+ if (ip->protocol != IPPROTO_UDP)
+ return 0;
+
+ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+ if (ntohs(udp->dest) != 67)
+ return 0;
+
+ offset = (u8 *)udp + 8 - skb->data;
+ length = skb->len - offset;
+ return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
+ }
+
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
+{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+
+ netif_stop_queue(adapter->netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (e1000_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(adapter->netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
+{
+ BUG_ON(size > tx_ring->count);
+
+ if (e1000_desc_unused(tx_ring) >= size)
+ return 0;
+ return __e1000_maybe_stop_tx(tx_ring, size);
+}
+
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+ unsigned int mss;
+ int count = 0;
+ int tso;
+ unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
+
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size with TCTL.PSP set is 17 bytes so
+ * pad skb in order to meet this minimum size requirement
+ */
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ u8 hdr_len;
+
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data
+ */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ /* we do this workaround for ES2LAN, but it is un-necessary,
+ * avoiding it could save a lot of cycles
+ */
+ if (skb->data_len && (hdr_len == len)) {
+ unsigned int pull_size;
+
+ pull_size = min_t(unsigned int, 4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ e_err("__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb_headlen(skb);
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+ adapter->tx_fifo_limit);
+
+ if (adapter->hw.mac.tx_pkt_filtering)
+ e1000_transfer_dhcp_info(adapter, skb);
+
+ /* need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time
+ */
+ if (e1000_maybe_stop_tx(tx_ring, count + 2))
+ return NETDEV_TX_BUSY;
+
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(tx_ring, skb, protocol);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ else if (e1000_tx_csum(tx_ring, skb, protocol))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ /* Old method was to assume IPv4 packet by default if TSO was enabled.
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
+ * no longer assume, we must.
+ */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ if (unlikely(skb->no_fcs))
+ tx_flags |= E1000_TX_FLAGS_NO_FCS;
+
+ /* if count is 0 then mapping error has occurred */
+ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+ nr_frags);
+ if (count) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
+ !adapter->tx_hwtstamp_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+
+ netdev_sent_queue(netdev, skb->len);
+ e1000_tx_queue(tx_ring, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(tx_ring,
+ (MAX_SKB_FRAGS *
+ DIV_ROUND_UP(PAGE_SIZE,
+ adapter->tx_fifo_limit) + 2));
+
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring,
+ tx_ring->next_to_use);
+ else
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ /* we need this if more than one processor can write
+ * to our tail at a time, it synchronizes IO on
+ *IA64/Altix systems
+ */
+ mmiowb();
+ }
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter;
+ adapter = container_of(work, struct e1000_adapter, reset_task);
+
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ if (!(adapter->flags & FLAG_RESTART_NOW)) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter unexpectedly\n");
+ }
+ e1000e_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
+ *
+ * Returns the address of the device statistics structure.
+ **/
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Jumbo frame support */
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Supported frame sizes */
+ if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ (max_frame > adapter->max_hw_frame_size)) {
+ e_err("Unsupported MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+ if ((adapter->hw.mac.type >= e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
+ adapter->max_frame_size = max_frame;
+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ if (netif_running(netdev))
+ e1000e_down(adapter, true);
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
+
+ if (max_frame <= 2048)
+ adapter->rx_buffer_len = 2048;
+ else
+ adapter->rx_buffer_len = 4096;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
+ + ETH_FCS_LEN;
+
+ if (netif_running(netdev))
+ e1000e_up(adapter);
+ else
+ e1000e_reset(adapter);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (adapter->hw.phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+ e1000_phy_read_status(adapter);
+
+ switch (data->reg_num & 0x1F) {
+ case MII_BMCR:
+ data->val_out = adapter->phy_regs.bmcr;
+ break;
+ case MII_BMSR:
+ data->val_out = adapter->phy_regs.bmsr;
+ break;
+ case MII_PHYSID1:
+ data->val_out = (adapter->hw.phy.id >> 16);
+ break;
+ case MII_PHYSID2:
+ data->val_out = (adapter->hw.phy.id & 0xFFFF);
+ break;
+ case MII_ADVERTISE:
+ data->val_out = adapter->phy_regs.advertise;
+ break;
+ case MII_LPA:
+ data->val_out = adapter->phy_regs.lpa;
+ break;
+ case MII_EXPANSION:
+ data->val_out = adapter->phy_regs.expansion;
+ break;
+ case MII_CTRL1000:
+ data->val_out = adapter->phy_regs.ctrl1000;
+ break;
+ case MII_STAT1000:
+ data->val_out = adapter->phy_regs.stat1000;
+ break;
+ case MII_ESTATUS:
+ data->val_out = adapter->phy_regs.estatus;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * @netdev: network interface device structure
+ * @ifreq: interface request
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret_val = e1000e_config_hwtstamp(adapter, &config);
+ if (ret_val)
+ return ret_val;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* With V2 type filters which specify a Sync or Delay Request,
+ * Path Delay Request/Response messages are also time stamped
+ * by hardware so notify the caller the requested packets plus
+ * some others are time stamped.
+ */
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ default:
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 i, mac_reg, wuc;
+ u16 phy_reg, wuc_enable;
+ int retval;
+
+ /* copy MAC RARs to PHY RARs */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ retval = hw->phy.ops.acquire(hw);
+ if (retval) {
+ e_err("Could not acquire PHY\n");
+ return retval;
+ }
+
+ /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ goto release;
+
+ /* copy MAC MTA to PHY MTA - only needed for pchlan */
+ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+ mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+ (u16)((mac_reg >> 16) & 0xFFFF));
+ }
+
+ /* configure PHY Rx Control register */
+ hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
+ mac_reg = er32(RCTL);
+ if (mac_reg & E1000_RCTL_UPE)
+ phy_reg |= BM_RCTL_UPE;
+ if (mac_reg & E1000_RCTL_MPE)
+ phy_reg |= BM_RCTL_MPE;
+ phy_reg &= ~(BM_RCTL_MO_MASK);
+ if (mac_reg & E1000_RCTL_MO_3)
+ phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ << BM_RCTL_MO_SHIFT);
+ if (mac_reg & E1000_RCTL_BAM)
+ phy_reg |= BM_RCTL_BAM;
+ if (mac_reg & E1000_RCTL_PMCF)
+ phy_reg |= BM_RCTL_PMCF;
+ mac_reg = er32(CTRL);
+ if (mac_reg & E1000_CTRL_RFCE)
+ phy_reg |= BM_RCTL_RFCE;
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+
+ wuc = E1000_WUC_PME_EN;
+ if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
+ wuc |= E1000_WUC_APME;
+
+ /* enable PHY wakeup in MAC register */
+ ew32(WUFC, wufc);
+ ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
+ E1000_WUC_PME_STATUS | wuc));
+
+ /* configure and enable PHY wakeup in PHY registers */
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
+
+ /* activate PHY wakeup */
+ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ e_err("Could not set PHY Host Wakeup bit\n");
+release:
+ hw->phy.ops.release(hw);
+
+ return retval;
+}
+
+static void e1000e_flush_lpic(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ret_val;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto fl_out;
+
+ pr_info("EEE TX LPI TIMER: %08X\n",
+ er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
+
+ hw->phy.ops.release(hw);
+
+fl_out:
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
+static int e1000e_pm_freeze(struct device *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ /* Quiesce the device without resetting the hardware */
+ e1000e_down(adapter, false);
+ e1000_free_irq(adapter);
+ }
+ e1000e_reset_interrupt_capability(adapter);
+
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000e_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+ ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+ adapter->hw.phy.media_type ==
+ e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ if (!runtime)
+ e1000e_power_up_phy(adapter);
+
+ if (adapter->flags & FLAG_IS_ICH)
+ e1000_suspend_workarounds_ich8lan(&adapter->hw);
+
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ /* enable wakeup by the PHY */
+ retval = e1000_init_phy_wakeup(adapter, wufc);
+ if (retval)
+ return retval;
+ } else {
+ /* enable wakeup by the MAC */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PME_EN);
+ }
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+
+ e1000_power_down_phy(adapter);
+ }
+
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
+ e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
+ if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
+ retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+
+ if (retval)
+ return retval;
+ }
+
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ pci_clear_master(pdev);
+
+ /* The pci-e switch on some quad port adapters will report a
+ * correctable error when the MAC transitions from D0 to D3. To
+ * prevent this we need to mask off the correctable errors on the
+ * downstream port of the pci-e switch.
+ *
+ * We don't have the associated upstream bridge while assigning
+ * the PCI device into guest. For example, the KVM on power is
+ * one of the cases.
+ */
+ if (adapter->flags & FLAG_IS_QUAD_PORT) {
+ struct pci_dev *us_dev = pdev->bus->self;
+ u16 devctl;
+
+ if (!us_dev)
+ return 0;
+
+ pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
+ (devctl & ~PCI_EXP_DEVCTL_CERE));
+
+ pci_save_state(pdev);
+ pci_prepare_to_sleep(pdev);
+
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_disable_aspm - Disable ASPM states
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc, parent_aspmc;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ /* fall-through - can't have L1 without L0s */
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ return;
+ }
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspmc);
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ /* Nothing to do if the ASPM states to be disabled already are */
+ if (!(pdev_aspmc & aspm_dis_mask) &&
+ (!parent || !(parent_aspmc & aspm_dis_mask)))
+ return;
+
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
+ "L0s" : "",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
+ "L1" : "");
+
+#ifdef CONFIG_PCIEASPM
+ pci_disable_link_state_locked(pdev, state);
+
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc))
+ return;
+#endif
+
+ /* Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
+
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_dis_mask);
+}
+
+#ifdef CONFIG_PM
+static int __e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ pci_set_master(pdev);
+
+ if (hw->mac.type >= e1000_pch2lan)
+ e1000_resume_workarounds_pchlan(&adapter->hw);
+
+ e1000e_power_up_phy(adapter);
+
+ /* report the system wakeup cause from S3/S4 */
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ u16 phy_data;
+
+ e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
+ if (phy_data) {
+ e_info("PHY Wakeup cause - %s\n",
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
+ }
+ e1e_wphy(&adapter->hw, BM_WUS, ~0);
+ } else {
+ u32 wus = er32(WUS);
+
+ if (wus) {
+ e_info("MAC Wakeup cause - %s\n",
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
+ }
+ ew32(WUS, ~0);
+ }
+
+ e1000e_reset(adapter);
+
+ e1000_init_manageability_pt(adapter);
+
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ u32 err = e1000_request_irq(adapter);
+
+ if (err)
+ return err;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static int e1000e_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ e1000e_flush_lpic(pdev);
+
+ e1000e_pm_freeze(dev);
+
+ return __e1000_shutdown(pdev, false);
+}
+
+static int e1000e_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
+
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
+
+ return e1000e_pm_thaw(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int e1000e_pm_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u16 eee_lp;
+
+ eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
+
+ if (!e1000e_has_link(adapter)) {
+ adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
+ pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000e_pm_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (netdev->flags & IFF_UP)
+ rc = e1000e_up(adapter);
+
+ return rc;
+}
+
+static int e1000e_pm_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (netdev->flags & IFF_UP) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ /* Down the device without resetting the hardware */
+ e1000e_down(adapter, false);
+ }
+
+ if (__e1000_shutdown(pdev, true)) {
+ e1000e_pm_runtime_resume(dev);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ e1000e_flush_lpic(pdev);
+
+ e1000e_pm_freeze(&pdev->dev);
+
+ __e1000_shutdown(pdev, false);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->msix_entries) {
+ int vector, msix_irq;
+
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_netpoll
+ * @netdev: network interface device structure
+ *
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e1000e_down(adapter, true);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000e_pm_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+ int err;
+ pci_ers_result_t result;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pdev->state_saved = true;
+ pci_restore_state(pdev);
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000e_reset(adapter);
+ ew32(WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ return result;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000e_pm_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000_init_manageability_pt(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000e_up(adapter)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+}
+
+static void e1000_print_device_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 ret_val;
+ u8 pba_str[E1000_PBANUM_LENGTH];
+
+ /* print bus type/speed/width info */
+ e_info("(PCI Express:2.5GT/s:%s) %pM\n",
+ /* bus width */
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ "Width x1"),
+ /* MAC address */
+ netdev->dev_addr);
+ e_info("Intel(R) PRO/%s Network Connection\n",
+ (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
+ ret_val = e1000_read_pba_string_generic(hw, pba_str,
+ E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, pba_str);
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+ u16 buf = 0;
+
+ if (hw->mac.type != e1000_82573)
+ return;
+
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+ le16_to_cpus(&buf);
+ if (!ret_val && (!(buf & (1 << 0)))) {
+ /* Deep Smart Power Down (DSPD) */
+ dev_warn(&adapter->pdev->dev,
+ "Warning: detected DSPD enabled in EEPROM\n");
+ }
+}
+
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
+ adapter->flags |= FLAG_TSO_FORCE;
+
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
+ NETIF_F_RXALL)))
+ return 0;
+
+ if (changed & NETIF_F_RXFCS) {
+ if (features & NETIF_F_RXFCS) {
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
+ } else {
+ /* We need to take it back to defaults, which might mean
+ * stripping is still disabled at the adapter level.
+ */
+ if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ else
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
+ }
+ }
+
+ netdev->features = features;
+
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
+ return 0;
+}
+
+static const struct net_device_ops e1000e_netdev_ops = {
+ .ndo_open = e1000_open,
+ .ndo_stop = e1000_close,
+ .ndo_start_xmit = e1000_xmit_frame,
+ .ndo_get_stats64 = e1000e_get_stats64,
+ .ndo_set_rx_mode = e1000e_set_rx_mode,
+ .ndo_set_mac_address = e1000_set_mac,
+ .ndo_change_mtu = e1000_change_mtu,
+ .ndo_do_ioctl = e1000_ioctl,
+ .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+
+ .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e1000_netpoll,
+#endif
+ .ndo_set_features = e1000_set_features,
+};
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+ const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
+ resource_size_t mmio_start, mmio_len;
+ resource_size_t flash_start, flash_len;
+ static int cards_found;
+ u16 aspm_disable_flag = 0;
+ int bars, i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ s32 rval = 0;
+
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions_exclusive(pdev, bars,
+ e1000e_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ /* PCI config space info */
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ netdev->irq = pdev->irq;
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ hw = &adapter->hw;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ei = ei;
+ adapter->pba = ei->pba;
+ adapter->flags = ei->flags;
+ adapter->flags2 = ei->flags2;
+ adapter->hw.adapter = adapter;
+ adapter->hw.mac.type = ei->mac;
+ adapter->max_hw_frame_size = ei->max_hw_frame_size;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+
+ err = -EIO;
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw.hw_addr)
+ goto err_ioremap;
+
+ if ((adapter->flags & FLAG_HAS_FLASH) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
+ (hw->mac.type < e1000_pch_spt)) {
+ flash_start = pci_resource_start(pdev, 1);
+ flash_len = pci_resource_len(pdev, 1);
+ adapter->hw.flash_address = ioremap(flash_start, flash_len);
+ if (!adapter->hw.flash_address)
+ goto err_flashmap;
+ }
+
+ /* Set default EEE advertisement */
+ if (adapter->flags2 & FLAG2_HAS_EEE)
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+ /* construct the net_device struct */
+ netdev->netdev_ops = &e1000e_netdev_ops;
+ e1000e_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
+ strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+
+ adapter->bd_number = cards_found++;
+
+ e1000e_check_options(adapter);
+
+ /* setup adapter struct */
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+ err = ei->get_variants(adapter);
+ if (err)
+ goto err_hw_init;
+
+ if ((adapter->flags & FLAG_IS_ICH) &&
+ (adapter->flags & FLAG_READ_ONLY_NVM) &&
+ (hw->mac.type < e1000_pch_spt))
+ e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
+ hw->mac.ops.get_bus_info(&adapter->hw);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ /* Copper options */
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+ adapter->hw.phy.disable_polarity_correction = 0;
+ adapter->hw.phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
+
+ /* Set initial default active device features */
+ netdev->features = (NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM);
+
+ /* Set user-changeable features (subset of all device features) */
+ netdev->hw_features = netdev->features;
+ netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->hw_features |= NETIF_F_RXALL;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->vlan_features |= (NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (e1000e_enable_mng_pass_thru(&adapter->hw))
+ adapter->flags |= FLAG_MNG_PT_ENABLED;
+
+ /* before reading the NVM, reset the controller to
+ * put the device in a known good starting state
+ */
+ adapter->hw.mac.ops.reset_hw(&adapter->hw);
+
+ /* systems with ASPM and others may see the checksum fail on the first
+ * attempt. Let's give it a few tries
+ */
+ for (i = 0;; i++) {
+ if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
+ break;
+ if (i == 2) {
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+
+ e1000_eeprom_checks(adapter);
+
+ /* copy the MAC address */
+ if (e1000e_read_mac_addr(&adapter->hw))
+ dev_err(&pdev->dev,
+ "NVM Read Error while reading MAC address\n");
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+ INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+ INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+ INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+
+ /* Initialize link parameters. User can change them with ethtool */
+ adapter->hw.mac.autoneg = 1;
+ adapter->fc_autoneg = true;
+ adapter->hw.fc.requested_mode = e1000_fc_default;
+ adapter->hw.fc.current_mode = e1000_fc_default;
+ adapter->hw.phy.autoneg_advertised = 0x2f;
+
+ /* Initial Wake on LAN setting - If APM wake is enabled in
+ * the EEPROM, enable the ACPI Magic Packet filter
+ */
+ if (adapter->flags & FLAG_APME_IN_WUC) {
+ /* APME bit in EEPROM is mapped to WUC.APME */
+ eeprom_data = er32(WUC);
+ eeprom_apme_mask = E1000_WUC_APME;
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
+ adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
+ } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
+ if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+ (adapter->hw.bus.func == 1))
+ rval = e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_B,
+ 1, &eeprom_data);
+ else
+ rval = e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A,
+ 1, &eeprom_data);
+ }
+
+ /* fetch WoL from EEPROM */
+ if (rval)
+ e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+ else if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port
+ */
+ if (!(adapter->flags & FLAG_HAS_WOL))
+ adapter->eeprom_wol = 0;
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ device_wakeup_enable(&pdev->dev);
+
+ /* save off EEPROM version number */
+ rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+ if (rval) {
+ e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+ adapter->eeprom_vers = 0;
+ }
+
+ /* reset the hardware with the new settings */
+ e1000e_reset(adapter);
+
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
+ e1000_print_device_info(adapter);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+
+err_register:
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_release_hw_control(adapter);
+err_eeprom:
+ if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
+ e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_sw_init:
+ if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
+ iounmap(adapter->hw.flash_address);
+ e1000e_reset_interrupt_capability(adapter);
+err_flashmap:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ e1000e_ptp_remove(adapter);
+
+ /* The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
+ */
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->downshift_task);
+ cancel_work_sync(&adapter->update_phy_task);
+ cancel_work_sync(&adapter->print_hang_task);
+
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ cancel_work_sync(&adapter->tx_hwtstamp_work);
+ if (adapter->tx_hwtstamp_skb) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ }
+ }
+
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
+ unregister_netdev(netdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ e1000e_reset_interrupt_capability(adapter);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.hw_addr);
+ if ((adapter->hw.flash_address) &&
+ (adapter->hw.mac.type < e1000_pch_spt))
+ iounmap(adapter->hw.flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ free_netdev(netdev);
+
+ /* AER disable */
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static const struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static const struct pci_device_id e1000_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
+ board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
+ board_80003es2lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
+
+ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+static const struct dev_pm_ops e1000_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = e1000e_pm_suspend,
+ .resume = e1000e_pm_resume,
+ .freeze = e1000e_pm_freeze,
+ .thaw = e1000e_pm_thaw,
+ .poweroff = e1000e_pm_suspend,
+ .restore = e1000e_pm_resume,
+#endif
+ SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
+};
+
+/* PCI Device API Driver */
+static struct pci_driver e1000_driver = {
+ .name = e1000e_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = e1000_remove,
+ .driver = {
+ .pm = &e1000_pm_ops,
+ },
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+ int ret;
+
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+ ret = pci_register_driver(&e1000_driver);
+
+ return ret;
+}
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+module_exit(e1000_exit_module);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* netdev.c */
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/nvm.c b/kernel/drivers/net/ethernet/intel/e1000e/nvm.c
new file mode 100644
index 000000000..fa6b1036a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -0,0 +1,633 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u32 mask;
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ e1e_flush();
+
+ udelay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ eecd = er32(EECD);
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = er32(EERD);
+ else
+ reg = er32(EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return 0;
+
+ udelay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000e_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000e_acquire_nvm(struct e1000_hw *hw)
+{
+ u32 eecd = er32(EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ ew32(EECD, eecd | E1000_EECD_REQ);
+ eecd = er32(EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = er32(EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ e_dbg("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ eecd = er32(EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000e_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000e_release_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e1000_stop_nvm(hw);
+
+ eecd = er32(EECD);
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u8 spi_stat_reg;
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ ew32(EERD, eerd);
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val) {
+ e_dbg("NVM read error: %d\n", ret_val);
+ break;
+ }
+
+ data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -E1000_ERR_NVM;
+ u16 widx = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ usleep_range(10000, 20000);
+ nvm->ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ e_dbg("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ e_dbg("NVM PBA number is not stored as string\n");
+
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
+ e_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ e_dbg("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ e_dbg("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16)NVM_SUM) {
+ e_dbg("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ e_dbg("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000e_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000e_reload_nvm_generic(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ usleep_range(10, 20);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/nvm.h b/kernel/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644
index 000000000..342bf69ef
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -0,0 +1,40 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/param.c b/kernel/drivers/net/ethernet/intel/e1000e/param.c
new file mode 100644
index 000000000..aa1923f7e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/param.c
@@ -0,0 +1,533 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define COPYBREAK_DEFAULT 256
+unsigned int copybreak = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non-zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: varies depending on kernel configuration & hardware support
+ *
+ * legacy=0, MSI=1, MSI-X=2
+ *
+ * When MSI/MSI-X support is enabled in kernel-
+ * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise
+ * When MSI/MSI-X support is not enabled in kernel-
+ * Default Value: 0 (legacy)
+ *
+ * When a mode is specified that is not allowed/supported, it will be
+ * demoted to the most advanced interrupt mode available.
+ */
+E1000_PARAM(IntMode, "Interrupt Mode");
+#define MAX_INTMODE 2
+#define MIN_INTMODE 0
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+/* Write Protect NVM
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(WriteProtectNVM,
+ "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+
+/* Enable CRC Stripping
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(CrcStripping,
+ "Enable CRC Stripping, disable if your BMC needs the CRC");
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ /* range_option info */
+ struct {
+ int min;
+ int max;
+ } r;
+ /* list_option info */
+ struct {
+ int nr;
+ struct e1000_opt_list {
+ int i;
+ char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ dev_info(&adapter->pdev->dev, "%s Enabled\n",
+ opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ dev_info(&adapter->pdev->dev, "%s Disabled\n",
+ opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ dev_info(&adapter->pdev->dev, "%s set to %i\n",
+ opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct e1000_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ dev_info(&adapter->pdev->dev, "%s\n",
+ ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * e1000e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void e1000e_check_options(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int bd = adapter->bd_number;
+
+ if (bd >= E1000_MAX_NIC) {
+ dev_notice(&adapter->pdev->dev,
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(&adapter->pdev->dev,
+ "Using defaults for all values\n");
+ }
+
+ /* Transmit Interrupt Delay */
+ {
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY } }
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ /* Transmit Absolute Interrupt Delay */
+ {
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY } }
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ /* Receive Interrupt Delay */
+ {
+ static struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY } }
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ /* Receive Absolute Interrupt Delay */
+ {
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY } }
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ /* Interrupt Throttling Rate */
+ {
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR } }
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+
+ /* Make sure a message is printed for non-special
+ * values. And in case of an invalid option, display
+ * warning, use default and go through itr/itr_setting
+ * adjustment logic below
+ */
+ if ((adapter->itr > 4) &&
+ e1000_validate_option(&adapter->itr, &opt, adapter))
+ adapter->itr = opt.def;
+ } else {
+ /* If no option specified, use default value and go
+ * through the logic below to adjust itr/itr_setting
+ */
+ adapter->itr = opt.def;
+
+ /* Make sure a message is printed for non-special
+ * default values
+ */
+ if (adapter->itr > 4)
+ dev_info(&adapter->pdev->dev,
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
+ }
+
+ adapter->itr_setting = adapter->itr;
+ switch (adapter->itr) {
+ case 0:
+ dev_info(&adapter->pdev->dev, "%s turned off\n",
+ opt.name);
+ break;
+ case 1:
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic mode\n", opt.name);
+ adapter->itr = 20000;
+ break;
+ case 2:
+ dev_info(&adapter->pdev->dev,
+ "%s Invalid mode - setting default\n",
+ opt.name);
+ adapter->itr_setting = opt.def;
+ /* fall-through */
+ case 3:
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr = 20000;
+ break;
+ case 4:
+ dev_info(&adapter->pdev->dev,
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
+ break;
+ default:
+ /* Save the setting, because the dynamic bits
+ * change itr.
+ *
+ * Clear the lower two bits because
+ * they are used as control.
+ */
+ adapter->itr_setting &= ~3;
+ break;
+ }
+ }
+ /* Interrupt Mode */
+ {
+ static struct e1000_option opt = {
+ .type = range_option,
+ .name = "Interrupt Mode",
+#ifndef CONFIG_PCI_MSI
+ .err = "defaulting to 0 (legacy)",
+ .def = E1000E_INT_MODE_LEGACY,
+ .arg = { .r = { .min = 0,
+ .max = 0 } }
+#endif
+ };
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->flags & FLAG_HAS_MSIX) {
+ opt.err = kstrdup("defaulting to 2 (MSI-X)",
+ GFP_KERNEL);
+ opt.def = E1000E_INT_MODE_MSIX;
+ opt.arg.r.max = E1000E_INT_MODE_MSIX;
+ } else {
+ opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL);
+ opt.def = E1000E_INT_MODE_MSI;
+ opt.arg.r.max = E1000E_INT_MODE_MSI;
+ }
+
+ if (!opt.err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate memory\n");
+ return;
+ }
+#endif
+
+ if (num_IntMode > bd) {
+ unsigned int int_mode = IntMode[bd];
+
+ e1000_validate_option(&int_mode, &opt, adapter);
+ adapter->int_mode = int_mode;
+ } else {
+ adapter->int_mode = opt.def;
+ }
+
+#ifdef CONFIG_PCI_MSI
+ kfree(opt.err);
+#endif
+ }
+ /* Smart Power Down */
+ {
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ if (num_SmartPowerDownEnable > bd) {
+ unsigned int spd = SmartPowerDownEnable[bd];
+
+ e1000_validate_option(&spd, &opt, adapter);
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
+ adapter->flags |= FLAG_SMART_POWER_DOWN;
+ }
+ }
+ /* CRC Stripping */
+ {
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "CRC Stripping",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_CrcStripping > bd) {
+ unsigned int crc_stripping = CrcStripping[bd];
+
+ e1000_validate_option(&crc_stripping, &opt, adapter);
+ if (crc_stripping == OPTION_ENABLED) {
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
+ }
+ } else {
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
+ }
+ }
+ /* Kumeran Lock Loss Workaround */
+ {
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+ bool enabled = opt.def;
+
+ if (num_KumeranLockLoss > bd) {
+ unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ enabled = kmrn_lock_loss;
+ }
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ enabled);
+ }
+ /* Write-protect NVM */
+ {
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Write-protect NVM",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ if (num_WriteProtectNVM > bd) {
+ unsigned int write_protect_nvm =
+ WriteProtectNVM[bd];
+ e1000_validate_option(&write_protect_nvm, &opt,
+ adapter);
+ if (write_protect_nvm)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ } else {
+ if (opt.def)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ }
+ }
+ }
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/phy.c b/kernel/drivers/net/ethernet/intel/e1000e/phy.c
new file mode 100644
index 000000000..b2005e13f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/phy.c
@@ -0,0 +1,3237 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000.h"
+
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
+};
+
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_m88_cable_length_table)
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124
+};
+
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_igp_2_cable_length_table)
+
+/**
+ * e1000e_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return 0, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ manc = er32(MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ * e1000e_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 e1000e_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+ u16 retry_count = 0;
+
+ if (!phy->ops.read_reg)
+ return 0;
+
+ while (retry_count < 2) {
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usleep_range(20, 40);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ return 0;
+
+ retry_count++;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_phy_reset_dsp - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ return ret_val;
+
+ return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ * e1000e_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16)mdic;
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ return 0;
+}
+
+/**
+ * e1000e_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ return 0;
+}
+
+/**
+ * e1000e_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ e_dbg("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000e_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000e_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset, data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ if (!locked) {
+ s32 ret_val = 0;
+
+ if (!hw->phy.ops.acquire)
+ return 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ kmrnctrlsta = er32(KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return 0;
+}
+
+/**
+ * e1000e_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000e_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ if (!locked) {
+ s32 ret_val = 0;
+
+ if (!hw->phy.ops.acquire)
+ return 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return 0;
+}
+
+/**
+ * e1000e_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000e_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Master/slave mode
+ **/
+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Resolve Master/Slave mode */
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ?
+ ((phy_data & CTL1000_AS_MASTER) ?
+ e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (hw->phy.ms_type) {
+ case e1000_ms_force_master:
+ phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CTL1000_ENABLE_MASTER;
+ phy_data &= ~(CTL1000_AS_MASTER);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CTL1000_ENABLE_MASTER;
+ /* fall-through */
+ default:
+ break;
+ }
+
+ return e1e_wphy(hw, MII_CTRL1000, phy_data);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDI/MDIX mode */
+ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_set_master_slave_mode(hw);
+}
+
+/**
+ * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* For BM PHY this bit is downshift enable */
+ if (phy->type != e1000_phy_bm)
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift on BM (disabled by default) */
+ if (phy->type == e1000_phy_bm) {
+ /* For 82574/82583, first disable then enable downshift */
+ if (phy->id == BME1000_E_PHY_ID_R2) {
+ phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+ }
+
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy->type == e1000_phy_m88) &&
+ (phy->revision < E1000_REVISION_4) &&
+ (phy->id != BME1000_E_PHY_ID_R2)) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+ /* Set PHY page 0, register 29 to 0x0003 */
+ ret_val = e1e_wphy(hw, 29, 0x0003);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY page 0, register 30 to 0x0000 */
+ ret_val = e1e_wphy(hw, 30, 0x0000);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Commit the changes. */
+ if (phy->ops.commit) {
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ if (phy->type == e1000_phy_82578) {
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val) {
+ e_dbg("Error resetting the PHY.\n");
+ return ret_val;
+ }
+
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msleep(100);
+
+ /* disable lplu d0 during driver init */
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~CTL1000_ENABLE_MASTER;
+ ret_val = e1e_wphy(hw, MII_CTRL1000, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_10FULL | ADVERTISE_10HALF);
+ mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+
+ e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ e_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= ADVERTISE_10HALF;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ e_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= ADVERTISE_10FULL;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ e_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= ADVERTISE_100HALF;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ e_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ e_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ e_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (MII_ADVERTISE) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &=
+ ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000e_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
+ mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ e_dbg("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000e_setup_copper_link - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ e_dbg("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ e_dbg("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ e_dbg("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ } else {
+ e_dbg("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("IGP PSCR: %X\n", phy_data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("M88E1000 PSCR: %X\n", phy_data);
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Reset the phy to commit changes. */
+ if (hw->phy.ops.commit) {
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ if (hw->phy.type != e1000_phy_m88) {
+ e_dbg("Link taking longer than expected.\n");
+ } else {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_phy_reset_dsp(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type != e1000_phy_m88)
+ return 0;
+
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = e1e_wphy(hw, MII_BMCR, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("IFE PMC: %X\n", data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of MII_BMCR
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the MII_BMCR register for these settings to
+ * take affect.
+ **/
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~BMCR_ANENABLE;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~BMCR_FULLDPLX;
+ e_dbg("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= BMCR_FULLDPLX;
+ e_dbg("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= BMCR_SPEED100;
+ *phy_ctrl &= ~BMCR_SPEED1000;
+ e_dbg("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100);
+ e_dbg("Forcing 10mb\n");
+ }
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_check_downshift - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000e_check_downshift(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ switch (phy->type) {
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return 0;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = !!(phy_data & mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ /* Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /* This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ /* Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & BMSR_ANEGCOMPLETE)
+ break;
+ msleep(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the MII_BMSR register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & BMSR_LSTATUS)
+ break;
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return 0;
+}
+
+/**
+ * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return 0;
+}
+
+/**
+ * e1000e_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ if (phy->media_type != e1000_media_type_copper) {
+ e_dbg("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->polarity_correction = !!(phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
+
+ ret_val = e1000_check_polarity_m88(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (phy_data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_igp(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+ }
+
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+ return 0;
+}
+
+/**
+ * e1000e_phy_sw_reset - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= BMCR_RESET;
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return 0;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ e1e_flush();
+
+ udelay(phy->reset_delay_us);
+
+ ew32(CTRL, ctrl);
+ e1e_flush();
+
+ usleep_range(150, 300);
+
+ phy->ops.release(hw);
+
+ return phy->ops.get_cfg_done(hw);
+}
+
+/**
+ * e1000e_get_cfg_done_generic - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw)
+{
+ mdelay(10);
+
+ return 0;
+}
+
+/**
+ * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ e_dbg("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ e1e_wphy(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ e1e_wphy(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ e1e_wphy(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ e1e_wphy(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to Tx amplitude in Gig mode */
+ e1e_wphy(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ e1e_wphy(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ e1e_wphy(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ e1e_wphy(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ e1e_wphy(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ e1e_wphy(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ e1e_wphy(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ e1e_wphy(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ e1e_wphy(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ e1e_wphy(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ e1e_wphy(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ e1e_wphy(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ e1e_wphy(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ e1e_wphy(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ e1e_wphy(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ e1e_wphy(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ e1e_wphy(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ e1e_wphy(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ e1e_wphy(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ e1e_wphy(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ e1e_wphy(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ e1e_wphy(hw, 0x1798, 0xD008);
+ /* Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ e1e_wphy(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ e1e_wphy(hw, 0x187A, 0x0800);
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ e1e_wphy(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ e1e_wphy(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ e1e_wphy(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ e1e_wphy(hw, 0x0000, 0x1340);
+
+ return 0;
+}
+
+/**
+ * e1000e_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ case BME1000_E_PHY_ID_R2:
+ phy_type = e1000_phy_bm;
+ break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
+ case I217_E_PHY_ID:
+ phy_type = e1000_phy_i217;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000e_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000e_determine_phy_address(struct e1000_hw *hw)
+{
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000e_get_phy_id(hw);
+ phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
+
+ /* If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown)
+ return 0;
+
+ usleep_range(1000, 2000);
+ i++;
+ } while (i < 10);
+ }
+
+ return -E1000_ERR_PHY_TYPE;
+}
+
+/**
+ * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
+ * @page: page to access
+ *
+ * Returns the phy address for the page requested.
+ **/
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+ u32 phy_addr = 2;
+
+ if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000e_write_phy_reg_bm - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_bm - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_bm2 - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_bm2 - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ * Assumes semaphore already acquired and phy_reg points to a valid memory
+ * address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+ u16 temp;
+
+ /* All page select, port ctrl and wakeup registers use phy address 1 */
+ hw->phy.addr = 1;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ e_dbg("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (ret_val) {
+ e_dbg("Could not read PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Enable both PHY wakeup mode and Wakeup register page writes.
+ * Prevent a power state change by disabling ME and Host PHY wakeup.
+ */
+ temp = *phy_reg;
+ temp |= BM_WUC_ENABLE_BIT;
+ temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+ if (ret_val) {
+ e_dbg("Could not write PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Select Host Wakeup Registers page - caller now able to write
+ * registers on the Wakeup registers page
+ */
+ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+}
+
+/**
+ * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ * Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ * Assumes semaphore already acquired and *phy_reg is the contents of the
+ * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ * caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ e_dbg("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ /* Restore 769.17 to its original value */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+ if (ret_val)
+ e_dbg("Could not restore PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ * @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ * Read the PHY register at offset and store the retrieved information in
+ * data, or write data to PHY register at offset. Note the procedure to
+ * access the PHY wakeup registers is different than reading the other PHY
+ * registers. It works as such:
+ * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769.17.2 to its original value
+ *
+ * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ * Assumes semaphore is already acquired. When page_set==true, assumes
+ * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set)
+{
+ s32 ret_val;
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 phy_reg = 0;
+
+ /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ e_dbg("Attempting to access page %d while gig enabled.\n",
+ page);
+
+ if (!page_set) {
+ /* Enable access to PHY wakeup registers */
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val) {
+ e_dbg("Could not enable PHY wakeup reg access\n");
+ return ret_val;
+ }
+ }
+
+ e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+ /* Write the Wakeup register page offset value using opcode 0x11 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+ if (ret_val) {
+ e_dbg("Could not write address opcode to page %d\n", page);
+ return ret_val;
+ }
+
+ if (read) {
+ /* Read the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ data);
+ } else {
+ /* Write the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ *data);
+ }
+
+ if (ret_val) {
+ e_dbg("Could not access PHY reg %d.%d\n", page, reg);
+ return ret_val;
+ }
+
+ if (!page_set)
+ ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg &= ~BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg |= BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
+ usleep_range(1000, 2000);
+}
+
+/**
+ * __e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_read_phy_reg_page_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /* Workaround MDIO accesses being disabled after entering IEEE
+ * Power Down (when bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+
+ ret_val = e1000_access_phy_debug_regs_hv(hw,
+ (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_write_phy_reg_page_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY address based on page
+ * @page: page to be accessed
+ **/
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or write
+ *
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to access these regs uses the address port and data port to read/write.
+ * These accesses done with PHY address 2 and without using pages.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg;
+ u32 data_reg;
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
+ data_reg = addr_reg + 1;
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ e_dbg("Could not write the Address Offset port register\n");
+ return ret_val;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val)
+ e_dbg("Could not access the Data port register\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 data;
+
+ if (hw->phy.type != e1000_phy_82578)
+ return 0;
+
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ e1e_rphy(hw, MII_BMCR, &data);
+ if (data & BMCR_LOOPBACK)
+ return 0;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ return 0;
+
+ msleep(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
+ if (ret_val)
+ return ret_val;
+
+ return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ return -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/phy.h b/kernel/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644
index 000000000..537d2780b
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/phy.h
@@ -0,0 +1,236 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_SPEED_100 0x0100
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ptp.c b/kernel/drivers/net/ethernet/intel/e1000e/ptp.c
new file mode 100644
index 000000000..8d7b21dc7
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -0,0 +1,273 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* PTP 1588 Hardware Clock (PHC)
+ * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ struct e1000_hw *hw = &adapter->hw;
+ bool neg_adj = false;
+ unsigned long flags;
+ u64 adjustment;
+ u32 timinca, incvalue;
+ s32 ret_val;
+
+ if ((delta > ptp->max_adj) || (delta <= -1000000000))
+ return -EINVAL;
+
+ if (delta < 0) {
+ neg_adj = true;
+ delta = -delta;
+ }
+
+ /* Get the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (ret_val)
+ return ret_val;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+
+ incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
+
+ adjustment = incvalue;
+ adjustment *= delta;
+ adjustment = div_u64(adjustment, 1000000000);
+
+ incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+
+ timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
+ timinca |= incvalue;
+
+ ew32(TIMINCA, timinca);
+
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_adjtime(&adapter->tc, delta);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int e1000e_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+
+ /* reset the timecounter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc, ns);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void e1000e_systim_overflow_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ systim_overflow_work.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec64 ts;
+
+ adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+
+ e_dbg("SYSTIM overflow check at %lld.%09lu\n",
+ (long long) ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+}
+
+static const struct ptp_clock_info e1000e_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+ .gettime64 = e1000e_phc_gettime,
+ .settime64 = e1000e_phc_settime,
+ .enable = e1000e_phc_enable,
+};
+
+/**
+ * e1000e_ptp_init - initialize PTP for devices which support it
+ * @adapter: board private structure
+ *
+ * This function performs the required steps for enabling PTP support.
+ * If PTP support has already been loaded it simply calls the cyclecounter
+ * init routine and exits.
+ **/
+void e1000e_ptp_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->ptp_clock = NULL;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ adapter->ptp_clock_info = e1000e_ptp_clock_info;
+
+ snprintf(adapter->ptp_clock_info.name,
+ sizeof(adapter->ptp_clock_info.name), "%pm",
+ adapter->netdev->perm_addr);
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ case e1000_pch_spt:
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ adapter->ptp_clock_info.max_adj = 24000000 - 1;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ break;
+ default:
+ break;
+ }
+
+ INIT_DELAYED_WORK(&adapter->systim_overflow_work,
+ e1000e_systim_overflow_work);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ e_err("ptp_clock_register failed\n");
+ } else {
+ e_info("registered PHC clock\n");
+ }
+}
+
+/**
+ * e1000e_ptp_remove - disable PTP device and stop the overflow check
+ * @adapter: board private structure
+ *
+ * Stop the PTP support, and cancel the delayed work.
+ **/
+void e1000e_ptp_remove(struct e1000_adapter *adapter)
+{
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ cancel_delayed_work_sync(&adapter->systim_overflow_work);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ e_info("removed PHC\n");
+ }
+}
diff --git a/kernel/drivers/net/ethernet/intel/e1000e/regs.h b/kernel/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644
index 000000000..85eefc483
--- /dev/null
+++ b/kernel/drivers/net/ethernet/intel/e1000e/regs.h
@@ -0,0 +1,250 @@
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif