diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/net/ethernet/intel | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/intel')
122 files changed, 12407 insertions, 7196 deletions
diff --git a/kernel/drivers/net/ethernet/intel/Kconfig b/kernel/drivers/net/ethernet/intel/Kconfig index f4ff46558..4163b1648 100644 --- a/kernel/drivers/net/ethernet/intel/Kconfig +++ b/kernel/drivers/net/ethernet/intel/Kconfig @@ -6,9 +6,7 @@ config NET_VENDOR_INTEL bool "Intel devices" default y ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. + If you have a network (Ethernet) card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/kernel/drivers/net/ethernet/intel/e100.c b/kernel/drivers/net/ethernet/intel/e100.c index 1a450f4b6..068789e69 100644 --- a/kernel/drivers/net/ethernet/intel/e100.c +++ b/kernel/drivers/net/ethernet/intel/e100.c @@ -874,7 +874,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, { struct cb *cb; unsigned long flags; - int err = 0; + int err; spin_lock_irqsave(&nic->cb_lock, flags); @@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, dma_addr = pci_map_single(nic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); /* If we can't map the skb, have the upper layer try later */ - if (pci_dma_mapping_error(nic->pdev, dma_addr)) + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; return -ENOMEM; + } /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for @@ -2922,9 +2925,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - init_timer(&nic->watchdog); - nic->watchdog.function = e100_watchdog; - nic->watchdog.data = (unsigned long)nic; + setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic); INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); @@ -2969,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->params.cbs.max * sizeof(struct cb), sizeof(u32), 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } netif_info(nic, probe, nic->netdev, "addr 0x%llx, irq %d, MAC addr %pM\n", (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), @@ -2976,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_out_pool: + unregister_netdev(netdev); err_out_free: e100_free(nic); err_out_iounmap: diff --git a/kernel/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/kernel/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 4270ad2d4..83e557c7f 100644 --- a/kernel/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, diff --git a/kernel/drivers/net/ethernet/intel/e1000/e1000_hw.c b/kernel/drivers/net/ethernet/intel/e1000/e1000_hw.c index 45c8c8641..b1af0d613 100644 --- a/kernel/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/kernel/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -3900,10 +3900,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, return E1000_SUCCESS; } - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and * not enough words. */ @@ -4074,10 +4070,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, return E1000_SUCCESS; } - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and * not enough words. */ diff --git a/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c b/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c index 983eb4e6f..fd7be860c 100644 --- a/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2079,11 +2079,6 @@ static void *e1000_alloc_frag(const struct e1000_adapter *a) return data; } -static void e1000_free_frag(const void *data) -{ - put_page(virt_to_head_page(data)); -} - /** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure @@ -2107,7 +2102,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, adapter->rx_buffer_len, DMA_FROM_DEVICE); if (buffer_info->rxbuf.data) { - e1000_free_frag(buffer_info->rxbuf.data); + skb_free_frag(buffer_info->rxbuf.data); buffer_info->rxbuf.data = NULL; } } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { @@ -3825,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) if (work_done < budget) { if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->flags)) e1000_irq_enable(adapter); } @@ -4594,28 +4589,28 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, data = e1000_alloc_frag(adapter); /* Failed allocation, critical failure */ if (!data) { - e1000_free_frag(olddata); + skb_free_frag(olddata); adapter->alloc_rx_buff_failed++; break; } if (!e1000_check_64k_bound(adapter, data, bufsz)) { /* give up */ - e1000_free_frag(data); - e1000_free_frag(olddata); + skb_free_frag(data); + skb_free_frag(olddata); adapter->alloc_rx_buff_failed++; break; } /* Use new allocation */ - e1000_free_frag(olddata); + skb_free_frag(olddata); } buffer_info->dma = dma_map_single(&pdev->dev, data, adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - e1000_free_frag(data); + skb_free_frag(data); buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; break; @@ -4637,7 +4632,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, adapter->rx_buffer_len, DMA_FROM_DEVICE); - e1000_free_frag(data); + skb_free_frag(data); buffer_info->rxbuf.data = NULL; buffer_info->dma = 0; diff --git a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 08f22f348..2af603f3e 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h index 535a94309..a2162e116 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/82571.c b/kernel/drivers/net/ethernet/intel/e1000e/82571.c index 32e77755a..5f7016442 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/82571.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/82571.h b/kernel/drivers/net/ethernet/intel/e1000e/82571.h index 2e758f796..abc6a9abf 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/82571.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/defines.h b/kernel/drivers/net/ethernet/intel/e1000e/defines.h index 0570c668e..133d4074d 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/defines.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/e1000.h b/kernel/drivers/net/ethernet/intel/e1000e/e1000.h index 0abc942c9..0b748d195 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -98,6 +98,8 @@ struct e1000_info; #define DEFAULT_RADV 8 #define BURST_RDTR 0x20 #define BURST_RADV 0x20 +#define PCICFG_DESC_RING_STATUS 0xe4 +#define FLUSH_DESC_REQUIRED 0x100 /* in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when @@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define INCVALUE_SHIFT_25MHz 18 #define INCPERIOD_25MHz 1 +#define INCVALUE_24MHz 125 +#define INCVALUE_SHIFT_24MHz 14 +#define INCPERIOD_24MHz 3 + /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with * by simply reading the clock before it overflows. diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c b/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c index 11f486e4f..6cab1f30d 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, @@ -1516,8 +1514,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) static int e1000_setup_loopback_test(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; - + u32 rctl, fext_nvm11, tarc0; + + if (hw->mac.type == e1000_pch_spt) { + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + tarc0 &= 0xcfffffff; + /* set bit 29 (value of MULR requests is now 2) */ + tarc0 |= 0x20000000; + ew32(TARC(0), tarc0); + } if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { switch (hw->mac.type) { @@ -1542,7 +1551,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) static void e1000_loopback_cleanup(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; + u32 rctl, fext_nvm11, tarc0; u16 phy_reg; rctl = er32(RCTL); @@ -1550,6 +1559,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) ew32(RCTL, rctl); switch (hw->mac.type) { + case e1000_pch_spt: + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + /* set bit 29 (value of MULR requests is now 0) */ + tarc0 &= 0xcfffffff; + ew32(TARC(0), tarc0); + /* fall through */ case e1000_80003es2lan: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { diff --git a/kernel/drivers/net/ethernet/intel/e1000e/hw.h b/kernel/drivers/net/ethernet/intel/e1000e/hw.h index 19e8c487d..c9da4654e 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/hw.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c index e2498dbf3..91a5a0ae9 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -237,17 +237,19 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { - /* Unforce SMBus mode in PHY */ - e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + /* Only unforce SMBus if ME is not active */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); - /* Unforce SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } } return true; @@ -1014,8 +1016,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ - s64 lat_ns; /* latency (ns) */ - s64 value; + u64 value; u32 rxa; if (!hw->adapter->max_frame_size) { @@ -1040,14 +1041,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ - lat_ns = ((s64)rxa * 1024 - - (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; - if (lat_ns < 0) - lat_ns = 0; - else - do_div(lat_ns, speed); + rxa *= 512; + value = (rxa > hw->adapter->max_frame_size) ? + (rxa - hw->adapter->max_frame_size) * (16000 / speed) : + 0; - value = lat_ns; while (value > PCI_LTR_VALUE_MASK) { scale++; value = DIV_ROUND_UP(value, (1 << 5)); @@ -1091,6 +1089,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) u32 mac_reg; s32 ret_val = 0; u16 phy_reg; + u16 oem_reg = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || @@ -1132,33 +1131,37 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, - &phy_reg); + &oem_reg); if (ret_val) goto release; + + phy_reg = oem_reg; phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, phy_reg); + if (ret_val) goto release; } - /* Force SMBus mode in PHY */ - ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); - if (ret_val) - goto release; - phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; - e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); - - /* Force SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - /* Set Inband ULP Exit, Reset to SMBus mode and * Disable SMBus Release on PERST# in PHY */ @@ -1170,10 +1173,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (to_sx) { if (er32(WUFC) & E1000_WUFC_LNKC) phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; } else { phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; } e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); @@ -1185,6 +1193,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) /* Commit ULP changes in PHY by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && + to_sx && (er32(STATUS) & E1000_STATUS_LU)) { + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + oem_reg); + if (ret_val) + goto release; + } + release: hw->phy.ops.release(hw); out: @@ -1383,16 +1400,20 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (((hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) && link) { - u32 reg; + u16 speed, duplex; - reg = er32(STATUS); + e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); tipg_reg = er32(TIPG); tipg_reg &= ~E1000_TIPG_IPGT_MASK; - if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + if (duplex == HALF_DUPLEX && speed == SPEED_10) { tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ emi_val = 0; + } else if (hw->mac.type == e1000_pch_spt && + duplex == FULL_DUPLEX && speed != SPEED_1000) { + tipg_reg |= 0xC; + emi_val = 1; } else { /* Roll back the default values */ @@ -1416,14 +1437,59 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; + + if (hw->mac.type == e1000_pch_spt) { + u16 data; + u16 ptr_gap; + + if (speed == SPEED_1000) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, + PHY_REG(776, 20), + &data); + if (ret_val) { + hw->phy.ops.release(hw); + return ret_val; + } + + ptr_gap = (data & (0x3FF << 2)) >> 2; + if (ptr_gap < 0x18) { + data &= ~(0x3FF << 2); + data |= (0x18 << 2); + ret_val = + e1e_wphy_locked(hw, + PHY_REG(776, 20), + data); + } + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } + } + } + + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + ew32(FEXTNVM4, mac_reg); } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || - (hw->mac.type == e1000_pch_spt)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h index 770a573b9..34c551e32 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -98,15 +98,22 @@ #define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 /* bit for disabling packet buffer read */ #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 - +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 #define K1_ENTRY_LATENCY 0 #define K1_MIN_TIME 1 #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ - +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/kernel/drivers/net/ethernet/intel/e1000e/mac.c b/kernel/drivers/net/ethernet/intel/e1000e/mac.c index 30b74d590..e59d7c283 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/mac.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/mac.h b/kernel/drivers/net/ethernet/intel/e1000e/mac.h index 0513d90cd..8284618af 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/mac.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/manage.c b/kernel/drivers/net/ethernet/intel/e1000e/manage.c index 06edfca1a..cc9b3befc 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/manage.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/manage.h b/kernel/drivers/net/ethernet/intel/e1000e/manage.h index a8c27f98f..0b9ea5952 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/manage.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/netdev.c b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c index 68913d103..0a854a47d 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION +#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; adapter->flags2 &= ~FLAG2_IS_DISCARDING; - - writel(0, rx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(rx_ring, 0); - else - writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - - writel(0, tx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_tdt_wa(tx_ring, 0); - else - writel(0, tx_ring->tail); } /** @@ -2705,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) if (work_done < weight) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); @@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + writel(0, tx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); + /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); /* Tx irq moderation */ @@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + writel(0, rx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); + /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); if (adapter->netdev->features & NETIF_F_RXCSUM) @@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - case e1000_pch_spt: - /* On I217, I218 and I219, the clock frequency is 25MHz - * or 96MHz as indicated by the System Clock Frequency - * Indication - */ - if (((hw->mac.type != e1000_pch_lpt) && - (hw->mac.type != e1000_pch_spt)) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; incvalue = INCVALUE_96MHz; shift = INCVALUE_SHIFT_96MHz; adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + } else { + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + } + break; + case e1000_pch_spt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHz; + incvalue = INCVALUE_24MHz; + shift = INCVALUE_SHIFT_24MHz; + adapter->cc.shift = shift; break; } - /* fall-through */ + return -EINVAL; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ @@ -3788,6 +3796,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) } /** + * e1000_flush_tx_ring - remove all descriptors from the tx_ring + * + * We want to clear all pending descriptors from the TX ring. + * zeroing happens when the HW reads the regs. We assign the ring itself as + * the data of the next descriptor. We don't care about the data we are about + * to reset the HW. + */ +static void e1000_flush_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + u16 size = 512; + + tctl = er32(TCTL); + ew32(TCTL, tctl | E1000_TCTL_EN); + tdt = er32(TDT(0)); + BUG_ON(tdt != tx_ring->next_to_use); + tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); + tx_desc->buffer_addr = tx_ring->dma; + + tx_desc->lower.data = cpu_to_le32(txd_lower | size); + tx_desc->upper.data = 0; + /* flush descriptors to memory before notifying the HW */ + wmb(); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + ew32(TDT(0), tx_ring->next_to_use); + mmiowb(); + usleep_range(200, 250); +} + +/** + * e1000_flush_rx_ring - remove all descriptors from the rx_ring + * + * Mark all descriptors in the RX ring as consumed and disable the rx ring + */ +static void e1000_flush_rx_ring(struct e1000_adapter *adapter) +{ + u32 rctl, rxdctl; + struct e1000_hw *hw = &adapter->hw; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + + rxdctl = er32(RXDCTL(0)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, host threshold to 1 + * and make sure the granularity is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + ew32(RXDCTL(0), rxdctl); + /* momentarily enable the RX ring for the changes to take effect */ + ew32(RCTL, rctl | E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + ew32(RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * e1000_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting the HW + * or before changing the device state to D3 during runtime (runtime PM). + * + * Failure to do this will cause the HW to enter a unit hang state which can + * only be released by PCI reset on the device + * + */ + +static void e1000_flush_desc_rings(struct e1000_adapter *adapter) +{ + u16 hang_state; + u32 fext_nvm11, tdlen; + struct e1000_hw *hw = &adapter->hw; + + /* First, disable MULR fix in FEXTNVM11 */ + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + /* do nothing if we're not in faulty state, or if the queue is empty */ + tdlen = er32(TDLEN(0)); + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) + return; + e1000_flush_tx_ring(adapter); + /* recheck, maybe the fault is caused by the rx ring */ + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (hang_state & FLUSH_DESC_REQUIRED) + e1000_flush_rx_ring(adapter); +} + +/** * e1000e_reset - bring the hardware into a known good state * * This function boots the hardware and enables some settings that @@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter) } } + if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter) phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } + if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + u32 reg; + + /* Fextnvm7 @ 0xe4[2] = 1 */ + reg = er32(FEXTNVM7); + reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; + ew32(FEXTNVM7, reg); + /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ + reg = er32(FEXTNVM9); + reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | + E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; + ew32(FEXTNVM9, reg); + } + } int e1000e_up(struct e1000_adapter *adapter) @@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); - e1000_clean_tx_ring(adapter->tx_ring); - e1000_clean_rx_ring(adapter->rx_ring); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) e1000_lv_jumbo_workaround_ich8lan(hw, false)) e_dbg("failed to disable jumbo frame workaround mode\n"); - if (reset && !pci_channel_offline(adapter->pdev)) - e1000e_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) { + if (reset) + e1000e_reset(adapter); + else if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); + } + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); } void e1000e_reinit_locked(struct e1000_adapter *adapter) @@ -4150,11 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, cc); struct e1000_hw *hw = &adapter->hw; + u32 systimel_1, systimel_2, systimeh; cycle_t systim, systim_next; - - /* latch SYSTIMH on read of SYSTIML */ - systim = (cycle_t)er32(SYSTIML); - systim |= (cycle_t)er32(SYSTIMH) << 32; + /* SYSTIMH latching upon SYSTIML read does not work well. + * This means that if SYSTIML overflows after we read it but before + * we read SYSTIMH, the value of SYSTIMH has been incremented and we + * will experience a huge non linear increment in the systime value + * to fix that we test for overflow and if true, we re-read systime. + */ + systimel_1 = er32(SYSTIML); + systimeh = er32(SYSTIMH); + systimel_2 = er32(SYSTIML); + /* Check for overflow. If there was no overflow, use the values */ + if (systimel_1 < systimel_2) { + systim = (cycle_t)systimel_1; + systim |= (cycle_t)systimeh << 32; + } else { + /* There was an overflow, read again SYSTIMH, and use + * systimel_2 + */ + systimeh = er32(SYSTIMH); + systim = (cycle_t)systimel_2; + systim |= (cycle_t)systimeh << 32; + } if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { u64 incvalue, time_delta, rem, temp; @@ -4451,6 +4599,7 @@ static int e1000_open(struct net_device *netdev) return 0; err_req_irq: + pm_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); @@ -6179,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) return retval; } + /* Ensure that the appropriate bits are set in LPI_CTRL + * for EEE in Sx + */ + if ((hw->phy.type >= e1000_phy_i217) && + adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { + u16 lpi_ctrl = 0; + + retval = hw->phy.ops.acquire(hw); + if (!retval) { + retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, + &lpi_ctrl); + if (!retval) { + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_100_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, + lpi_ctrl); + } + } + hw->phy.ops.release(hw); + } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -6217,13 +6393,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } /** - * e1000e_disable_aspm - Disable ASPM states + * __e1000e_disable_aspm - Disable ASPM states * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable + * @locked: indication if this context holds pci_bus_sem locked. * * Some devices *must* have certain ASPM states disabled per hardware errata. **/ -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) { struct pci_dev *parent = pdev->bus->self; u16 aspm_dis_mask = 0; @@ -6262,7 +6439,10 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) "L1" : ""); #ifdef CONFIG_PCIEASPM - pci_disable_link_state_locked(pdev, state); + if (locked) + pci_disable_link_state_locked(pdev, state); + else + pci_disable_link_state(pdev, state); /* Double-check ASPM control. If not disabled by the above, the * BIOS is preventing that from happening (or CONFIG_PCIEASPM is @@ -6285,6 +6465,32 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) aspm_dis_mask); } +/** + * e1000e_disable_aspm - Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function acquires the pci_bus_sem! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 0); +} + +/** + * e1000e_disable_aspm_locked Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function must be called with pci_bus_sem acquired! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 1); +} + #ifdef CONFIG_PM static int __e1000_resume(struct pci_dev *pdev) { @@ -6576,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); err = pci_enable_device_mem(pdev); if (err) { @@ -6676,6 +6882,19 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) } } +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_RXFCS; + + return features; +} + static int e1000_set_features(struct net_device *netdev, netdev_features_t features) { @@ -6732,6 +6951,8 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_poll_controller = e1000_netpoll, #endif .ndo_set_features = e1000_set_features, + .ndo_fix_features = e1000_fix_features, + .ndo_features_check = passthru_features_check, }; /** @@ -7287,7 +7508,7 @@ static int __init e1000_init_module(void) pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/kernel/drivers/net/ethernet/intel/e1000e/nvm.c b/kernel/drivers/net/ethernet/intel/e1000e/nvm.c index fa6b1036a..49f205c02 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/nvm.h b/kernel/drivers/net/ethernet/intel/e1000e/nvm.h index 342bf69ef..5d46967e0 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/param.c b/kernel/drivers/net/ethernet/intel/e1000e/param.c index aa1923f7e..6d8c39abe 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/param.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/param.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/phy.c b/kernel/drivers/net/ethernet/intel/e1000e/phy.c index b2005e13f..de13aeaca 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/phy.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/phy.h b/kernel/drivers/net/ethernet/intel/e1000e/phy.h index 537d2780b..55bfe4735 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/phy.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/ptp.c b/kernel/drivers/net/ethernet/intel/e1000e/ptp.c index 8d7b21dc7..25a0ad510 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/drivers/net/ethernet/intel/e1000e/regs.h b/kernel/drivers/net/ethernet/intel/e1000e/regs.h index 85eefc483..1d5e0b770 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/regs.h +++ b/kernel/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,6 +38,8 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ @@ -123,7 +125,6 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h b/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h index c8c8c5bae..144402004 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -101,12 +101,19 @@ struct fm10k_tx_queue_stats { u64 csum_err; u64 tx_busy; u64 tx_done_old; + u64 csum_good; }; struct fm10k_rx_queue_stats { u64 alloc_failed; u64 csum_err; u64 errors; + u64 csum_good; + u64 switch_errors; + u64 drops; + u64 pp_errors; + u64 link_errors; + u64 length_errors; }; struct fm10k_ring { @@ -251,6 +258,7 @@ struct fm10k_intfc { #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) #define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) #define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) +#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5) int xcast_mode; /* Tx fast path data */ @@ -277,6 +285,17 @@ struct fm10k_intfc { u64 rx_drops_nic; u64 rx_overrun_pf; u64 rx_overrun_vf; + + /* Debug Statistics */ + u64 hw_sm_mbx_full; + u64 hw_csum_tx_good; + u64 hw_csum_rx_good; + u64 rx_switch_errors; + u64 rx_drops; + u64 rx_pp_errors; + u64 rx_link_errors; + u64 rx_length_errors; + u32 tx_timeout_count; /* RX */ diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index f45b4d71a..5304bc1fb 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) } static void *fm10k_dbg_desc_seq_next(struct seq_file *s, - void __always_unused *v, loff_t *pos) + void __always_unused *v, + loff_t *pos) { struct fm10k_ring *ring = s->private; @@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s, } static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, - __always_unused void *v) + void __always_unused *v) { /* Do nothing. */ } @@ -175,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) return; /* Generate a folder for each q_vector */ - sprintf(name, "q_vector.%03d", q_vector->v_idx); + snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); if (!q_vector->dbg_q_vector) @@ -185,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->tx.count; i++) { struct fm10k_ring *ring = &q_vector->tx.ring[i]; - sprintf(name, "tx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, @@ -196,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->rx.count; i++) { struct fm10k_ring *ring = &q_vector->rx.ring[i]; - sprintf(name, "rx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 4b9d9f88a..2ce0eba5e 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -76,19 +76,22 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), - FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), - FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped), - FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), - FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), - FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), - FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), - FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), - FM10K_STAT("tx_hang_count", tx_timeout_count), FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; +static const struct fm10k_stats fm10k_gstrings_debug_stats[] = { + FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full), + FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good), + FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good), + FM10K_STAT("rx_switch_errors", rx_switch_errors), + FM10K_STAT("rx_drops", rx_drops), + FM10K_STAT("rx_pp_errors", rx_pp_errors), + FM10K_STAT("rx_link_errors", rx_link_errors), + FM10K_STAT("rx_length_errors", rx_length_errors), +}; + static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("timeout", stats.timeout.count), FM10K_STAT("ur", stats.ur.count), @@ -100,14 +103,33 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), }; +#define FM10K_MBX_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \ + .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { + FM10K_MBX_STAT("mbx_tx_busy", tx_busy), + FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped), + FM10K_MBX_STAT("mbx_tx_messages", tx_messages), + FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), + FM10K_MBX_STAT("mbx_rx_messages", rx_messages), + FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), + FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), +}; + #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) +#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats) #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) +#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN(_n) \ ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ - FM10K_NETDEV_STATS_LEN) + FM10K_NETDEV_STATS_LEN + \ + FM10K_MBX_STATS_LEN) static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { "Mailbox test (on/offline)" @@ -120,46 +142,97 @@ enum fm10k_self_test_types { FM10K_TEST_MAX = FM10K_TEST_LEN }; -static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) +enum { + FM10K_PRV_FLAG_DEBUG_STATS, + FM10K_PRV_FLAG_LEN, +}; + +static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { + "debug-statistics", +}; + +static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_iov_data *iov_data = interface->iov_data; char *p = (char *)data; - int i; + unsigned int i; + unsigned int j; - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *fm10k_gstrings_test, - FM10K_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_global_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_debug_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_global_stats[i].stat_string, + } + + for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (interface->hw.mac.type != fm10k_mac_vf) { + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + } - if (interface->hw.mac.type != fm10k_mac_vf) - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, - ETH_GSTRING_LEN); + if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { + for (i = 0; i < iov_data->num_vfs; i++) { + for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { + snprintf(p, + ETH_GSTRING_LEN, + "vf_%u_%s", i, + fm10k_gstrings_mbx_stats[j].stat_string); p += ETH_GSTRING_LEN; } - - for (i = 0; i < interface->hw.mac.max_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; } + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } +} + +static void fm10k_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + char *p = (char *)data; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *fm10k_gstrings_test, + FM10K_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + fm10k_get_stat_strings(dev, data); + break; + case ETH_SS_PRIV_FLAGS: + memcpy(p, fm10k_prv_flags, + FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); break; } } @@ -167,6 +240,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) static int fm10k_get_sset_count(struct net_device *dev, int sset) { struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; int stats_len = FM10K_STATIC_STATS_LEN; @@ -179,7 +253,16 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) if (hw->mac.type != fm10k_mac_vf) stats_len += FM10K_PF_STATS_LEN; + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + stats_len += FM10K_DEBUG_STATS_LEN; + + if (iov_data) + stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs; + } + return stats_len; + case ETH_SS_PRIV_FLAGS: + return FM10K_PRV_FLAG_LEN; default: return -EOPNOTSUPP; } @@ -191,6 +274,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, { const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; struct net_device_stats *net_stats = &netdev->stats; char *p; int i, j; @@ -210,13 +294,47 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - if (interface->hw.mac.type != fm10k_mac_vf) + if (interface->flags & FM10K_FLAG_DEBUG_STATS) { + for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { + p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { + p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (interface->hw.mac.type != fm10k_mac_vf) { for (i = 0; i < FM10K_PF_STATS_LEN; i++) { p = (char *)interface + fm10k_gstrings_pf_stats[i].stat_offset; *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + } + + if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { + for (i = 0; i < iov_data->num_vfs; i++) { + struct fm10k_vf_info *vf_info; + vf_info = &iov_data->vf_info[i]; + + /* skip stats if we don't have a vf info */ + if (!vf_info) { + data += FM10K_MBX_STATS_LEN; + continue; + } + + for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { + p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset; + *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + } for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; @@ -397,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev, sizeof(info->version) - 1); strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); - - info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); - - info->regdump_len = fm10k_get_regs_len(dev); } static void fm10k_get_pauseparam(struct net_device *dev, @@ -880,6 +994,33 @@ static void fm10k_self_test(struct net_device *dev, eth_test->flags |= ETH_TEST_FL_FAILED; } +static u32 fm10k_get_priv_flags(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + u32 priv_flags = 0; + + if (interface->flags & FM10K_FLAG_DEBUG_STATS) + priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS; + + return priv_flags; +} + +static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN)) + return -EINVAL; + + if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS)) + interface->flags |= FM10K_FLAG_DEBUG_STATS; + else + interface->flags &= ~FM10K_FLAG_DEBUG_STATS; + + return 0; +} + + static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; @@ -1093,6 +1234,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .get_regs = fm10k_get_regs, .get_regs_len = fm10k_get_regs_len, .self_test = fm10k_self_test, + .get_priv_flags = fm10k_get_priv_flags, + .set_priv_flags = fm10k_set_priv_flags, .get_rxfh_indir_size = fm10k_get_reta_size, .get_rxfh_key_size = fm10k_get_rssrk_size, .get_rxfh = fm10k_get_rssh, diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5b08e6284..acfb8b1f8 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -137,8 +137,11 @@ process_mbx: } /* guarantee we have free space in the SM mailbox */ - if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { + /* keep track of how many times this occurs */ + interface->hw_sm_mbx_full++; break; + } /* cleanup mailbox and process received messages */ mbx->ops.process(hw, mbx); @@ -228,9 +231,6 @@ int fm10k_iov_resume(struct pci_dev *pdev) hw->iov.ops.set_lport(hw, vf_info, i, FM10K_VF_FLAG_MULTI_CAPABLE); - /* assign our default vid to the VF following reset */ - vf_info->sw_vid = hw->mac.default_vid; - /* mailbox is disconnected so we don't send a message */ hw->iov.ops.assign_default_mac_vlan(hw, vf_info); @@ -400,11 +400,31 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) return num_vfs; } +static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, + struct fm10k_vf_info *vf_info) +{ + struct fm10k_hw *hw = &interface->hw; + + /* assigning the MAC address will send a mailbox message */ + fm10k_mbx_lock(interface); + + /* disable LPORT for this VF which clears switch rules */ + hw->iov.ops.reset_lport(hw, vf_info); + + /* assign new MAC+VLAN for this VF */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + /* re-enable the LPORT for this VF */ + hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx, + FM10K_VF_FLAG_MULTI_CAPABLE); + + fm10k_mbx_unlock(interface); +} + int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; - struct fm10k_hw *hw = &interface->hw; struct fm10k_vf_info *vf_info; /* verify SR-IOV is active and that vf idx is valid */ @@ -419,13 +439,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) vf_info = &iov_data->vf_info[vf_idx]; ether_addr_copy(vf_info->mac, mac); - /* assigning the MAC will send a mailbox message so lock is needed */ - fm10k_mbx_lock(interface); - - /* assign MAC address to VF */ - hw->iov.ops.assign_default_mac_vlan(hw, vf_info); - - fm10k_mbx_unlock(interface); + fm10k_reset_vf_info(interface, vf_info); return 0; } @@ -455,16 +469,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, /* record default VLAN ID for VF */ vf_info->pf_vid = vid; - /* assigning the VLAN will send a mailbox message so lock is needed */ - fm10k_mbx_lock(interface); - /* Clear the VLAN table for the VF */ hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); - /* Update VF assignment and trigger reset */ - hw->iov.ops.assign_default_mac_vlan(hw, vf_info); - - fm10k_mbx_unlock(interface); + fm10k_reset_vf_info(interface, vf_info); return 0; } diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c index c9da1b5d4..e76a44cf3 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->w.length); #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (likely(size <= FM10K_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -382,6 +398,8 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring, return; skb->ip_summed = CHECKSUM_UNNECESSARY; + + ring->rx_stats.csum_good++; } #define FM10K_RSS_L4_TYPES_MASK \ @@ -481,8 +499,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, if (rx_desc->w.vlan) { u16 vid = le16_to_cpu(rx_desc->w.vlan); - if (vid != rx_ring->vid) + if ((vid & VLAN_VID_MASK) != rx_ring->vid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + else if (vid & VLAN_PRIO_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid & VLAN_PRIO_MASK); } fm10k_type_trans(rx_ring, rx_desc, skb); @@ -518,44 +539,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, } /** - * fm10k_pull_tail - fm10k specific version of skb_pull_tail - * @skb: pointer to current skb being adjusted - * - * This function is an fm10k specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void fm10k_pull_tail(struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * fm10k_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -575,15 +558,23 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, { if (unlikely((fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_RXE)))) { +#define FM10K_TEST_RXD_BIT(rxd, bit) \ + ((rxd)->w.csum_err & cpu_to_le16(bit)) + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) + rx_ring->rx_stats.switch_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) + rx_ring->rx_stats.drops++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) + rx_ring->rx_stats.pp_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) + rx_ring->rx_stats.link_errors++; + if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) + rx_ring->rx_stats.length_errors++; dev_kfree_skb_any(skb); rx_ring->rx_stats.errors++; return true; } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - fm10k_pull_tail(skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -602,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, - struct fm10k_ring *rx_ring, - int budget) +static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *rx_ring, + int budget) { struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; @@ -671,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, q_vector->rx.total_packets += total_packets; q_vector->rx.total_bytes += total_bytes; - return total_packets < budget; + return total_packets; } #define VXLAN_HLEN (sizeof(struct udphdr) + 8) @@ -904,6 +895,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, /* update TX checksum flag */ first->tx_flags |= FM10K_TX_FLAGS_CSUM; + tx_ring->tx_stats.csum_good++; no_csum: /* populate Tx descriptor header size and mss */ @@ -1105,9 +1097,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_tx_buffer *first; int tso; u32 tx_flags = 0; -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD unsigned short f; -#endif u16 count = TXD_USE_COUNT(skb_headlen(skb)); /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, @@ -1115,12 +1105,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * + 2 desc gap to keep tail from touching head * otherwise try next time */ -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -#else - count += skb_shinfo(skb)->nr_frags; -#endif + if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; @@ -1435,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget) struct fm10k_q_vector *q_vector = container_of(napi, struct fm10k_q_vector, napi); struct fm10k_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; fm10k_for_each_ring(ring, q_vector->tx) @@ -1449,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - fm10k_for_each_ring(ring, q_vector->rx) - clean_complete &= fm10k_clean_rx_irq(q_vector, ring, - per_ring_budget); + fm10k_for_each_ring(ring, q_vector->rx) { + int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += work; + clean_complete &= !!(work < per_ring_budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable the q_vector */ fm10k_qv_enable(q_vector); @@ -1918,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u32 reta, base; /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state) { + if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 1b2738380..af09a1b27 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -129,8 +129,8 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) * fm10k_fifo_drop_all - Drop all messages in FIFO * @fifo: pointer to FIFO * - * This function resets the head pointer to drop all messages in the FIFO, - * and ensure the FIFO is empty. + * This function resets the head pointer to drop all messages in the FIFO and + * ensure the FIFO is empty. **/ static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) { @@ -899,6 +899,27 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function creates a fake disconnect header for loading into remote + * mailbox header. The primary purpose is to prevent errors on immediate + * start up after mbx->connect. + **/ +static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** * fm10k_mbx_create_error_msg - Generate a error message * @mbx: pointer to mailbox * @err: local error encountered @@ -1046,9 +1067,26 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, **/ static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) { + u16 len, head, ack; + /* reset our outgoing max size back to Rx limits */ mbx->max_size = mbx->rx.size - 1; + /* update mbx->pulled to account for tail_len and ack */ + head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); + ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + mbx->pulled += mbx->tail_len - ack; + + /* now drop any messages which have started or finished transmitting */ + while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) { + len = fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + if (mbx->pulled >= len) + mbx->pulled -= len; + else + mbx->pulled = 0; + } + /* just do a quick resysnc to start of message */ mbx->pushed = 0; mbx->pulled = 0; @@ -1259,16 +1297,11 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) { const u32 *hdr = &mbx->mbx_hdr; - s32 err_no; u16 head; /* we will need to pull all of the fields for verification */ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); - /* we only have lower 10 bits of error number so add upper bits */ - err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); - err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); - switch (mbx->state) { case FM10K_STATE_OPEN: case FM10K_STATE_DISCONNECT: @@ -1423,8 +1456,10 @@ static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) /* Place mbx in ready to connect state */ mbx->state = FM10K_STATE_CONNECT; + fm10k_mbx_reset_work(mbx); + /* initialize header of remote mailbox */ - fm10k_mbx_create_disconnect_hdr(mbx); + fm10k_mbx_create_fake_disconnect_hdr(mbx); fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); /* enable interrupt and notify other party of new message */ @@ -1730,7 +1765,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, mbx->state = FM10K_STATE_CLOSED; mbx->remote = 0; fm10k_mbx_reset_work(mbx); - fm10k_mbx_update_max_size(mbx, 0); + fm10k_fifo_drop_all(&mbx->tx); fm10k_write_reg(hw, mbx->mbmem_reg, 0); } diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2f4f41b7e..7781e8089 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* verify the skb head is not shared */ err = skb_cow_head(skb, 0); - if (err) + if (err) { + dev_kfree_skb(skb); return NETDEV_TX_OK; + } /* locate vlan header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); @@ -758,6 +760,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; s32 err; + int i; /* updates do not apply to VLAN 0 */ if (!vid) @@ -775,8 +778,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!set) clear_bit(vid, interface->active_vlans); - /* if default VLAN is already present do nothing */ - if (vid == hw->mac.default_vid) + /* disable the default VID on ring if we have an active VLAN */ + for (i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *rx_ring = interface->rx_ring[i]; + u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); + + if (test_bit(rx_vid, interface->active_vlans)) + rx_ring->vid |= FM10K_VLAN_CLEAR; + else + rx_ring->vid &= ~FM10K_VLAN_CLEAR; + } + + /* Do not remove default VID related entries from VLAN and MAC tables */ + if (!set && vid == hw->mac.default_vid) + return 0; + + /* Do not throw an error if the interface is down. We will sync once + * we come up + */ + if (test_bit(__FM10K_DOWN, &interface->state)) return 0; fm10k_mbx_lock(interface); @@ -923,18 +943,12 @@ static int __fm10k_mc_sync(struct net_device *dev, struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_hw *hw = &interface->hw; u16 vid, glort = interface->glort; - s32 err; - - if (!is_multicast_ether_addr(addr)) - return -EADDRNOTAVAIL; /* update table with current entries */ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { - err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); - if (err) - return err; + hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); } return 0; @@ -1002,21 +1016,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) int xcast_mode; u16 vid, glort; - /* restore our address if perm_addr is set */ - if (hw->mac.type == fm10k_mac_vf) { - if (is_valid_ether_addr(hw->mac.perm_addr)) { - ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); - ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; - } - - if (hw->mac.vlan_override) - netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - else - netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; - } - /* record glort for this interface */ glort = interface->glort; @@ -1051,7 +1050,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* update xcast mode before syncronizing addresses */ + /* update xcast mode before synchronizing addresses */ hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); /* synchronize all of the addresses */ @@ -1339,8 +1338,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); dglort.glort = interface->glort; - if (l2_accel) - dglort.shared_l = fls(l2_accel->size); + dglort.shared_l = fls(l2_accel->size); hw->mac.ops.configure_dglort_map(hw, &dglort); /* If table is empty remove it */ diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index df9fda38b..74be792f3 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* reassociate interrupts */ fm10k_mbx_request_irq(interface); + /* update hardware address for VFs if perm_addr has changed */ + if (hw->mac.type == fm10k_mac_vf) { + if (is_valid_ether_addr(hw->mac.perm_addr)) { + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); + ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + if (hw->mac.vlan_override) + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + /* reset clock */ fm10k_ts_reset(interface); @@ -259,8 +274,6 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) * @interface: board private structure * * This function will process both the upstream and downstream mailboxes. - * It is necessary for us to hold the rtnl_lock while doing this as the - * mailbox accesses are protected by this lock. **/ static void fm10k_mbx_subtask(struct fm10k_intfc *interface) { @@ -315,6 +328,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface) { struct net_device_stats *net_stats = &interface->netdev->stats; struct fm10k_hw *hw = &interface->hw; + u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0; + u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0; + u64 rx_link_errors = 0; u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; @@ -334,6 +350,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) tx_csum_errors += tx_ring->tx_stats.csum_err; bytes += tx_ring->stats.bytes; pkts += tx_ring->stats.packets; + hw_csum_tx_good += tx_ring->tx_stats.csum_good; } interface->restart_queue = restart_queue; @@ -341,6 +358,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->tx_bytes = bytes; net_stats->tx_packets = pkts; interface->tx_csum_errors = tx_csum_errors; + interface->hw_csum_tx_good = hw_csum_tx_good; + /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = interface->rx_ring[i]; @@ -350,12 +369,24 @@ void fm10k_update_stats(struct fm10k_intfc *interface) alloc_failed += rx_ring->rx_stats.alloc_failed; rx_csum_errors += rx_ring->rx_stats.csum_err; rx_errors += rx_ring->rx_stats.errors; + hw_csum_rx_good += rx_ring->rx_stats.csum_good; + rx_switch_errors += rx_ring->rx_stats.switch_errors; + rx_drops += rx_ring->rx_stats.drops; + rx_pp_errors += rx_ring->rx_stats.pp_errors; + rx_link_errors += rx_ring->rx_stats.link_errors; + rx_length_errors += rx_ring->rx_stats.length_errors; } net_stats->rx_bytes = bytes; net_stats->rx_packets = pkts; interface->alloc_failed = alloc_failed; interface->rx_csum_errors = rx_csum_errors; + interface->hw_csum_rx_good = hw_csum_rx_good; + interface->rx_switch_errors = rx_switch_errors; + interface->rx_drops = rx_drops; + interface->rx_pp_errors = rx_pp_errors; + interface->rx_link_errors = rx_link_errors; + interface->rx_length_errors = rx_length_errors; hw->mac.ops.update_hw_stats(hw, &interface->stats); @@ -483,7 +514,7 @@ static void fm10k_service_task(struct work_struct *work) interface = container_of(work, struct fm10k_intfc, service_task); - /* tasks always capable of running, but must be rtnl protected */ + /* tasks run even when interface is down */ fm10k_mbx_subtask(interface); fm10k_detach_subtask(interface); fm10k_reset_subtask(interface); @@ -663,6 +694,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* assign default VLAN to queue */ ring->vid = hw->mac.default_vid; + /* if we have an active VLAN, disable default VID */ + if (test_bit(hw->mac.default_vid, interface->active_vlans)) + ring->vid |= FM10K_VLAN_CLEAR; + /* Map interrupt */ if (ring->q_vector) { rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); @@ -861,10 +896,12 @@ void fm10k_netpoll(struct net_device *netdev) #endif #define FM10K_ERR_MSG(type) case (type): error = #type; break -static void fm10k_print_fault(struct fm10k_intfc *interface, int type, +static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, struct fm10k_fault *fault) { struct pci_dev *pdev = interface->pdev; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data = interface->iov_data; char *error; switch (type) { @@ -918,6 +955,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type, "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", error, fault->address, fault->specinfo, PCI_SLOT(fault->func), PCI_FUNC(fault->func)); + + /* For VF faults, clear out the respective LPORT, reset the queue + * resources, and then reconnect to the mailbox. This allows the + * VF in question to resume behavior. For transient faults that are + * the result of non-malicious behavior this will log the fault and + * allow the VF to resume functionality. Obviously for malicious VFs + * they will be able to attempt malicious behavior again. In this + * case, the system administrator will need to step in and manually + * remove or disable the VF in question. + */ + if (fault->func && iov_data) { + int vf = fault->func - 1; + struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf]; + + hw->iov.ops.reset_lport(hw, vf_info); + hw->iov.ops.reset_resources(hw, vf_info); + + /* reset_lport disables the VF, so re-enable it */ + hw->iov.ops.set_lport(hw, vf_info, vf, + FM10K_VF_FLAG_MULTI_CAPABLE); + + /* reset_resources will disconnect from the mbx */ + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } } static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) @@ -941,7 +1002,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) continue; } - fm10k_print_fault(interface, type, &fault); + fm10k_handle_fault(interface, type, &fault); } } @@ -1559,6 +1620,7 @@ void fm10k_down(struct fm10k_intfc *interface) /* free any buffers still on the rings */ fm10k_clean_all_tx_rings(interface); + fm10k_clean_all_rx_rings(interface); } /** @@ -1704,22 +1766,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, static void fm10k_slot_warn(struct fm10k_intfc *interface) { - struct device *dev = &interface->pdev->dev; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; struct fm10k_hw *hw = &interface->hw; + int max_gts = 0, expected_gts = 0; + + if (pcie_get_minimum_link(interface->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + dev_warn(&interface->pdev->dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } - if (hw->mac.ops.is_slot_appropriate(hw)) + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding has less than 2% impact on throughput */ + max_gts = 8 * width; + break; + default: + dev_warn(&interface->pdev->dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } + + dev_info(&interface->pdev->dev, + "PCI Express bandwidth of %dGT/s available\n", + max_gts); + dev_info(&interface->pdev->dev, + "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + hw->bus.width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "<2%" : + "Unknown"), + (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : + hw->bus.payload == fm10k_bus_payload_256 ? "256B" : + hw->bus.payload == fm10k_bus_payload_512 ? "512B" : + "Unknown")); + + switch (hw->bus_caps.speed) { + case fm10k_bus_speed_2500: + /* 8b/10b encoding reduces max throughput by 20% */ + expected_gts = 2 * hw->bus_caps.width; + break; + case fm10k_bus_speed_5000: + /* 8b/10b encoding reduces max throughput by 20% */ + expected_gts = 4 * hw->bus_caps.width; + break; + case fm10k_bus_speed_8000: + /* 128b/130b encoding has less than 2% impact on throughput */ + expected_gts = 8 * hw->bus_caps.width; + break; + default: + dev_warn(&interface->pdev->dev, + "Unable to determine expected PCI Express bandwidth.\n"); return; + } - dev_warn(dev, - "For optimal performance, a %s %s slot is recommended.\n", - (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" : - hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" : - "x8"), - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : - "8.0GT/s")); - dev_warn(dev, - "A slot with more lanes and/or higher speed is suggested.\n"); + if (max_gts < expected_gts) { + dev_warn(&interface->pdev->dev, + "This device requires %dGT/s of bandwidth for optimal performance.\n", + expected_gts); + dev_warn(&interface->pdev->dev, + "A %sslot with x%d lanes is suggested.\n", + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), + hw->bus_caps.width); + } } /** @@ -1738,32 +1864,19 @@ static int fm10k_probe(struct pci_dev *pdev, { struct net_device *netdev; struct fm10k_intfc *interface; - struct fm10k_hw *hw; int err; - u64 dma_mask; err = pci_enable_device_mem(pdev); if (err) return err; - /* By default fm10k only supports a 48 bit DMA mask */ - dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev); - - if ((dma_mask <= DMA_BIT_MASK(32)) || - dma_set_mask_and_coherent(&pdev->dev, dma_mask)) { - dma_mask &= DMA_BIT_MASK(32); - + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } - } + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: %d\n", err); + goto err_dma; } err = pci_request_selected_regions(pdev, @@ -1772,7 +1885,7 @@ static int fm10k_probe(struct pci_dev *pdev, fm10k_driver_name); if (err) { dev_err(&pdev->dev, - "pci_request_selected_regions failed 0x%x\n", err); + "pci_request_selected_regions failed: %d\n", err); goto err_pci_reg; } @@ -1794,7 +1907,6 @@ static int fm10k_probe(struct pci_dev *pdev, interface->netdev = netdev; interface->pdev = pdev; - hw = &interface->hw; interface->uc_addr = ioremap(pci_resource_start(pdev, 0), FM10K_UC_ADDR_SIZE); @@ -1836,24 +1948,12 @@ static int fm10k_probe(struct pci_dev *pdev, /* Register PTP interface */ fm10k_ptp_register(interface); - /* print bus type/speed/width info */ - dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n", - (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" : - hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : - hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : - "Unknown"), - (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" : - hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" : - hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" : - "Unknown"), - (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : - hw->bus.payload == fm10k_bus_payload_256 ? "256B" : - hw->bus.payload == fm10k_bus_payload_512 ? "512B" : - "Unknown")); - /* print warning for non-optimal configurations */ fm10k_slot_warn(interface); + /* report MAC address for logging */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ fm10k_iov_configure(pdev, 0); @@ -1994,6 +2094,16 @@ static int fm10k_resume(struct pci_dev *pdev) if (err) return err; + /* assume host is not ready, to prevent race with watchdog in case we + * actually don't have connection to the switch + */ + interface->host_ready = false; + fm10k_watchdog_host_not_ready(interface); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + fm10k_service_event_schedule(interface); + /* restore SR-IOV interface */ fm10k_iov_resume(pdev); @@ -2021,6 +2131,15 @@ static int fm10k_suspend(struct pci_dev *pdev, fm10k_iov_suspend(pdev); + /* the watchdog tasks may read registers, which will appear like a + * surprise-remove event once the PCI device is disabled. This will + * cause us to close the netdevice, so we don't retain the open/closed + * state post-resume. Prevent this by disabling the service task while + * suspended, until we actually resume. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + rtnl_lock(); if (netif_running(netdev)) diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 891e21874..8c0bdc4e4 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) return FM10K_ERR_DMA_PENDING; + /* verify the switch is ready for reset */ + reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + /* Inititate data path reset */ reg |= FM10K_DMA_CTRL_DATAPATH_RESET; fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); @@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) if (!(reg & FM10K_IP_NOTINRESET)) err = FM10K_ERR_RESET_FAILED; +out: return err; } @@ -185,19 +191,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) } /** - * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU - * @hw: pointer to hardware structure - * - * Looks at the PCIe bus info to confirm whether or not this slot can support - * the necessary bandwidth for this device. - **/ -static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) -{ - return (hw->bus.speed == hw->bus_caps.speed) && - (hw->bus.width == hw->bus_caps.width); -} - -/** * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table * @hw: pointer to hardware structure * @vid: VLAN ID to add to table @@ -1046,6 +1039,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); } + /* repeat the first ring for all the remaining VF rings */ + for (i = queues_per_pool; i < qmap_stride; i++) { + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx); + } + return 0; } @@ -1156,6 +1155,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, } /** + * fm10k_iov_select_vid - Select correct default VID + * @hw: Pointer to hardware structure + * @vid: VID to correct + * + * Will report an error if VID is out of range. For VID = 0, it will return + * either the pf_vid or sw_vid depending on which one is set. + */ +static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +{ + if (!vid) + return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; + else if (vf_info->pf_vid && vid != vf_info->pf_vid) + return FM10K_ERR_PARAM; + else + return vid; +} + +/** * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message @@ -1169,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - int err = 0; u8 mac[ETH_ALEN]; u32 *result; + int err = 0; + bool set; u16 vlan; u32 vid; @@ -1187,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (err) return err; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vid || (vid == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vid |= vf_info->pf_vid; - else - vid |= vf_info->sw_vid; - } else if (vid != vf_info->pf_vid) { + /* verify upper 16 bits are zero */ + if (vid >> 16) return FM10K_ERR_PARAM; - } + + set = !(vid & FM10K_VLAN_CLEAR); + vid &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vid); + if (err < 0) + return err; + else + vid = err; /* update VSI info for VF in regards to VLAN table */ - err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, - !(vid & FM10K_VLAN_CLEAR)); + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { @@ -1215,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, memcmp(mac, vf_info->mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vlan |= vf_info->pf_vid; - else - vlan |= vf_info->sw_vid; - } else if (vf_info->pf_vid) { - return FM10K_ERR_PARAM; - } + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + else + vlan = err; /* notify switch of request for new unicast address */ - err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, - !(vlan & FM10K_VLAN_CLEAR), 0); + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, + mac, vlan, set, 0); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { @@ -1242,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) return FM10K_ERR_PARAM; - /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ - if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { - if (vf_info->pf_vid) - vlan |= vf_info->pf_vid; - else - vlan |= vf_info->sw_vid; - } else if (vf_info->pf_vid) { - return FM10K_ERR_PARAM; - } + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + else + vlan = err; /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, - !(vlan & FM10K_VLAN_CLEAR)); + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, + mac, vlan, set); } return err; @@ -1345,6 +1363,14 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); + /* we need to clear VF_FLAG_ENABLED flags in order to ensure + * that we actually re-enable the LPORT state below. Note that + * this has no impact if the VF is already disabled, as the + * flags are already cleared. + */ + if (!err) + vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info); + /* when enabling the port we should reset the rate limiters */ hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); @@ -1786,8 +1812,8 @@ static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) return FM10K_ERR_PARAM; - if (ppb < 0) - systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE; + if (ppb > 0) + systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE; fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); @@ -1835,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = { .init_hw = &fm10k_init_hw_pf, .start_hw = &fm10k_start_hw_generic, .stop_hw = &fm10k_stop_hw_generic, - .is_slot_appropriate = &fm10k_is_slot_appropriate_pf, .update_vlan = &fm10k_update_vlan_pf, .read_mac_addr = &fm10k_read_mac_addr_pf, .update_uc_addr = &fm10k_update_uc_addr_pf, diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 7ab1db4ff..40a0dbc62 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -81,26 +81,26 @@ struct fm10k_mac_update { __le16 glort; u8 flags; u8 action; -}; +} __packed; struct fm10k_global_table_data { __le32 used; __le32 avail; -}; +} __packed; struct fm10k_swapi_error { __le32 status; struct fm10k_global_table_data mac; struct fm10k_global_table_data nexthop; struct fm10k_global_table_data ffu; -}; +} __packed; struct fm10k_swapi_1588_timestamp { __le64 egress; __le64 ingress; __le16 dglort; __le16 sglort; -}; +} __packed; s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c index 9043633c3..b4945e8ab 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -70,16 +70,16 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) * if none are present then insert skb in tail of list */ skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); - if (!skb) + if (!skb) { + skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; __skb_queue_tail(list, clone); + } spin_unlock_irqrestore(&list->lock, flags); /* if list is already has one then we just free the clone */ if (skb) - kfree_skb(skb); - else - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; + dev_kfree_skb(clone); } void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, @@ -103,9 +103,10 @@ void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, if (!skb) return; - /* timestamp the sk_buff and return it to the socket */ + /* timestamp the sk_buff and free out copy */ fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); - skb_complete_tx_timestamp(skb, &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); } void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 4af96686c..318a212f0 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -369,7 +369,7 @@ struct fm10k_hw; /* Registers contained in BAR 4 for Switch management */ #define FM10K_SW_SYSTIME_ADJUST 0x0224D #define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF -#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000 +#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000 #define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) enum fm10k_int_source { @@ -521,7 +521,6 @@ struct fm10k_mac_ops { s32 (*stop_hw)(struct fm10k_hw *); s32 (*get_bus_info)(struct fm10k_hw *); s32 (*get_host_state)(struct fm10k_hw *, bool *); - bool (*is_slot_appropriate)(struct fm10k_hw *); s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); s32 (*read_mac_addr)(struct fm10k_hw *); s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, @@ -763,6 +762,12 @@ enum fm10k_rxdesc_xc { #define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ #define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ +#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */ +#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */ +#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */ +#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */ +#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */ + struct fm10k_ftag { __be16 swpri_type_user; __be16 vlan; diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 94f0f6a14..36c8b0aa0 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) return 0; } -/** - * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU - * @hw: pointer to hardware structure - * - * Looks at the PCIe bus info to confirm whether or not this slot can support - * the necessary bandwidth for this device. Since the VF has no control over - * the "slot" it is in, always indicate that the slot is appropriate. - **/ -static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) -{ - return true; -} - /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), @@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = { .init_hw = &fm10k_init_hw_vf, .start_hw = &fm10k_start_hw_generic, .stop_hw = &fm10k_stop_hw_vf, - .is_slot_appropriate = &fm10k_is_slot_appropriate_vf, .update_vlan = &fm10k_update_vlan_vf, .read_mac_addr = &fm10k_read_mac_addr_vf, .update_uc_addr = &fm10k_update_uc_addr_vf, diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e.h b/kernel/drivers/net/ethernet/intel/i40e/i40e.h index 5d4730712..4dd3e2612 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e.h @@ -71,7 +71,6 @@ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 -#define I40E_MAX_REGISTER 0x800000 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 @@ -79,10 +78,13 @@ #define I40E_MIN_MSIX 2 #define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ #define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ -#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ +/* max 16 qps */ +#define i40e_default_queues_per_vmdq(pf) \ + (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ -#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ +#define i40e_pf_get_max_q_per_tc(pf) \ + (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #ifdef I40E_FCOE @@ -91,19 +93,26 @@ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 -#define I40E_AQ_WORK_LIMIT 32 +#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 -#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) +#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_FD_ATR BIT(2) +#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_OEM_VER_BUILD_MASK 0xffff +#define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -182,6 +191,7 @@ struct i40e_lump_tracking { enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, I40E_FD_STAT_SB, + I40E_FD_STAT_ATR_TUNNEL, I40E_FD_STAT_PF_COUNT }; #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) @@ -189,6 +199,8 @@ enum i40e_fd_stat_idx { (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) #define I40E_FD_SB_STAT_IDX(pf_id) \ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) +#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL) struct i40e_fdir_filter { struct hlist_node fdir_node; @@ -237,7 +249,6 @@ struct i40e_pf { struct pci_dev *pdev; struct i40e_hw hw; unsigned long state; - unsigned long link_check_timeout; struct msix_entry *msix_entries; bool fc_autoneg_status; @@ -263,8 +274,6 @@ struct i40e_pf { struct hlist_head fdir_filter_list; u16 fdir_pf_active_filters; - u16 fd_sb_cnt_idx; - u16 fd_atr_cnt_idx; unsigned long fd_flush_timestamp; u32 fd_flush_cnt; u32 fd_add_err; @@ -288,36 +297,45 @@ struct i40e_pf { struct work_struct service_task; u64 flags; -#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) -#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) -#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) -#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) -#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) -#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) -#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) -#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) -#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) +#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) +#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) +#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4) +#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5) +#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) +#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) +#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) +#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #ifdef I40E_FCOE -#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) +#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) -#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) -#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) -#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) -#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) -#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) -#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) -#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) -#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) -#define I40E_FLAG_PTP (u64)(1 << 25) -#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) +#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) +#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) +#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) +#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) +#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) +#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) +#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) +#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) +#define I40E_FLAG_PTP BIT_ULL(25) +#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) #endif -#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) -#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) +#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) +#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) +#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32) +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) +#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) +#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) +#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) +#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) +#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -361,6 +379,7 @@ struct i40e_pf { #ifdef CONFIG_DEBUG_FS struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ + bool cur_promisc; u16 instance; /* A unique number per i40e_pf instance in the system */ @@ -397,6 +416,9 @@ struct i40e_pf { /* These are only valid in NPAR modes */ u32 npar_max_bw; u32 npar_min_bw; + + u32 ioremap_len; + u32 fd_inv; }; struct i40e_mac_filter { @@ -431,6 +453,8 @@ struct i40e_veb { bool stat_offsets_loaded; struct i40e_eth_stats stats; struct i40e_eth_stats stats_offsets; + struct i40e_veb_tc_stats tc_stats; + struct i40e_veb_tc_stats tc_stats_offsets; }; /* struct that defines a VSI, associated with a dev */ @@ -442,10 +466,12 @@ struct i40e_vsi { u32 current_netdev_flags; unsigned long state; -#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) -#define I40E_VSI_FLAG_VEB_OWNER (1<<1) +#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) +#define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; + /* Per VSI lock to protect elements/list (MAC filter) */ + spinlock_t mac_filter_list_lock; struct list_head mac_filter_list; /* VSI stats */ @@ -460,6 +486,7 @@ struct i40e_vsi { #endif u32 tx_restart; u32 tx_busy; + u64 tx_linearize; u32 rx_buf_failed; u32 rx_page_failed; @@ -475,6 +502,7 @@ struct i40e_vsi { */ u16 rx_itr_setting; u16 tx_itr_setting; + u16 int_rate_limit; /* value in usecs */ u16 rss_table_size; u16 rss_size; @@ -520,6 +548,7 @@ struct i40e_vsi { u16 idx; /* index in pf->vsi[] */ u16 veb_idx; /* index of VEB parent */ struct kobject *kobj; /* sysfs object */ + bool current_isup; /* Sync 'link up' logging */ /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -549,6 +578,9 @@ struct i40e_q_vector { cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; + bool arm_wb_state; +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 should adjust ITR */ } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -558,22 +590,29 @@ struct i40e_device { }; /** - * i40e_fw_version_str - format the FW and NVM version strings + * i40e_nvm_version_str - format the NVM version strings * @hw: ptr to the hardware info **/ -static inline char *i40e_fw_version_str(struct i40e_hw *hw) +static inline char *i40e_nvm_version_str(struct i40e_hw *hw) { static char buf[32]; + u32 full_ver; + u8 ver, patch; + u16 build; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) + & I40E_OEM_VER_BUILD_MASK); + patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), - "f%d.%d.%05d a%d.%d n%x.%02x e%x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, + "%x.%02x 0x%x %d.%d.%d", (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - (hw->nvm.eetrack & 0xffffff)); + hw->nvm.eetrack, ver, build, patch); return buf; } @@ -652,7 +691,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev); -int i40e_sync_vsi_filters(struct i40e_vsi *vsi); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); @@ -685,7 +724,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} static inline void i40e_dbg_init(void) {} static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ -void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +/** + * i40e_irq_dynamic_enable - Enable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: enable a particular Hw Interrupt vector, without base_vector + **/ +static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val); + /* skip the flush */ +} + void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); @@ -724,7 +780,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt); u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf); void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi); void i40e_fcoe_vsi_setup(struct i40e_pf *pf); -int i40e_init_pf_fcoe(struct i40e_pf *pf); +void i40e_init_pf_fcoe(struct i40e_pf *pf); int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi); void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi); int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, @@ -756,4 +812,5 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); #endif /* _I40E_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037..1fd5ea82a 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -482,8 +482,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.asq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); @@ -492,16 +496,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.asq_mutex); - hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); +shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); - return ret_code; } @@ -515,8 +516,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.arq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); @@ -525,16 +530,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.arq_mutex); - hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); +shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); - return ret_code; } @@ -551,8 +553,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) **/ i40e_status i40e_init_adminq(struct i40e_hw *hw) { - i40e_status ret_code; + u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; + i40e_status ret_code; int retry = 0; /* verify input for valid configuration */ @@ -564,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -611,6 +610,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; @@ -655,7 +660,8 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); return ret_code; } @@ -678,8 +684,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "%s: ntc %d head %d.\n", __func__, ntc, - rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -742,19 +747,23 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, u16 retval = 0; u32 val = 0; - val = rd32(hw, hw->aq.asq.head); - if (val >= hw->aq.num_asq_entries) { + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: head overrun at %d\n", val); + "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } - if (hw->aq.asq.count == 0) { + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Admin queue not initialized.\n"); + "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); @@ -779,8 +788,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); - mutex_lock(&hw->aq.asq_mutex); - if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -889,6 +896,10 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + /* save writeback aq if requested */ + if (details->wb_desc) + *details->wb_desc = *desc_on_ring; + /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { @@ -900,7 +911,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); -asq_send_command_exit: return status; } @@ -946,6 +956,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1024,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { @@ -1014,6 +1033,19 @@ clean_arq_element_out: i40e_release_nvm(hw); hw->aq.nvm_release_on_done = false; } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } } return ret_code; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 28e519a50..12fbbddea 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -69,6 +69,7 @@ struct i40e_asq_cmd_details { u16 flags_dis; bool async; bool postpone; + struct i40e_aq_desc *wb_desc; }; #define I40E_ADMINQ_DETAILS(R, i) \ @@ -108,9 +109,10 @@ struct i40e_adminq_info { /** * i40e_aq_rc_to_posix - convert errors to user-land codes - * aq_rc: AdminQ error code to convert + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; - if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; + return aq_to_posix[aq_rc]; } diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 929e3d72a..6584b6cd7 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -132,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -262,7 +257,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -274,8 +272,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -509,7 +505,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -532,7 +529,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -826,8 +825,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -1068,6 +1071,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -1718,11 +1722,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; @@ -2058,12 +2064,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) #define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 #define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + #define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 #define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) #define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) + +/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with + * word boundary layout issues, which the Linux compilers silently deal + * with by adding padding, making the actual struct larger than designed. + * However, the FW compiler for the NIC is less lenient and complains + * about the struct. Hence, the struct defined here has an extra byte in + * fields reserved3 and reserved4 to directly acknowledge that padding, + * and the new length is used in the length check macro. + */ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; @@ -2071,9 +2093,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved2; u8 oper_tc_bw[8]; u8 oper_pfc_en; - u8 reserved3; + u8 reserved3[2]; __le16 oper_app_prio; - u8 reserved4; + u8 reserved4[2]; __le16 tlv_status; }; @@ -2110,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; @@ -2177,6 +2206,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure { diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c index 0bae22da0..2d74c6e4d 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -51,9 +51,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -72,6 +83,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) } /** + * i40e_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40e_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask @@ -114,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -177,6 +380,164 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40e_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40e_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. @@ -563,6 +924,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) switch (hw->mac.type) { case I40E_MAC_XL710: + case I40E_MAC_X722: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; @@ -582,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) else hw->pf_id = (u8)(func_rid & 0x7); + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + status = i40e_init_nvm(hw); return status; } @@ -659,7 +1024,7 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) - memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); + ether_addr_copy(mac_addr, addrs.pf_lan_mac); return status; } @@ -682,7 +1047,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) - memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); + ether_addr_copy(mac_addr, addrs.port_mac); else status = I40E_ERR_INVALID_MAC_ADDR; @@ -740,7 +1105,7 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr) return status; if (flags & I40E_AQC_SAN_ADDR_VALID) - memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac)); + ether_addr_copy(mac_addr, addrs.pf_san_mac); else status = I40E_ERR_INVALID_MAC_ADDR; @@ -881,7 +1246,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - for (cnt = 0; cnt < grst_del + 2; cnt++) { + for (cnt = 0; cnt < grst_del + 10; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; @@ -1187,9 +1552,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) blink = false; if (blink) - gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else - gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; @@ -1241,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) status = I40E_ERR_UNKNOWN_PHY; + if (report_init) + hw->phy.phy_types = le32_to_cpu(abilities->phy_type); + return status; } @@ -1341,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -1859,27 +2227,54 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct + * @link_up: pointer to bool (true/false = linkup/linkdown) * - * Returns true if link is up, false if link is down. + * Variable link_up true if link is up, false if link is down. + * The variable link_up is invalid if returned value of status != 0 * * Side effect: LinkStatusEvent reporting becomes enabled **/ -bool i40e_get_link_status(struct i40e_hw *hw) +i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { i40e_status status = 0; - bool link_status = false; if (hw->phy.get_link_info) { - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) - goto i40e_get_link_status_exit; + i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", + status); } - link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + + return status; +} + +/** + * i40e_updatelink_status - update status of the HW network link + * @hw: pointer to the hw struct + **/ +i40e_status i40e_update_link_info(struct i40e_hw *hw) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + i40e_status status = 0; + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + return status; + + if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; -i40e_get_link_status_exit: - return link_status; + memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type)); + } + + return status; } /** @@ -1986,6 +2381,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, *vebs_free = le16_to_cpu(cmd_resp->vebs_free); if (floating) { u16 flags = le16_to_cpu(cmd_resp->veb_flags); + if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = true; else @@ -2391,7 +2787,7 @@ i40e_aq_erase_nvm_exit: #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 @@ -2416,6 +2812,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; + u8 major_rev; u32 i = 0; u16 id; @@ -2433,6 +2830,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); + major_rev = cap->major_rev; switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: @@ -2507,9 +2905,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_MFP_MODE_1: - if (number == 1) - p->mfp_mode_1 = true; + case I40E_DEV_FUNC_CAP_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) @@ -2557,7 +2967,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ - if (p->npar_enable || p->mfp_mode_1) + if (p->npar_enable || p->flex10_enable) p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ @@ -3386,7 +3796,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } if (mac_addr) - memcpy(cmd->mac, mac_addr, ETH_ALEN); + ether_addr_copy(cmd->mac, mac_addr); cmd->etype = cpu_to_le16(ethtype); cmd->flags = cpu_to_le16(flags); @@ -3405,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + i40e_status status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 2547aa21b..2691277c0 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -292,6 +292,190 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, } /** + * i40e_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + **/ +static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * i40e_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + **/ +static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcenable = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * i40e_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + **/ +static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, typelength, offset = 0; + struct i40e_cee_app_prio *app; + u8 i, up, selector; + + typelength = ntohs(tlv->hdr.typelen); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + + dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) + return; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { + if (app->prio_map & BIT(up)) + break; + } + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); + if (selector == I40E_CEE_APP_SEL_ETHTYPE) + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + else if (selector == I40E_CEE_APP_SEL_TCPIP) + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + else + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + + dcbcfg->app[i].protocolid = ntohs(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * i40e_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 len, tlvlen, sublen, typelength; + struct i40e_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u32 ouisubtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + /* Return if not CEE DCBX */ + if (subtype != I40E_CEE_DCBX_TYPE) + return; + + typelength = ntohs(tlv->typelength); + tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + len = sizeof(tlv->typelength) + sizeof(ouisubtype) + + sizeof(struct i40e_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { + typelength = ntohs(sub_tlv->hdr.typelen); + sublen = (u16)((typelength & + I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + switch (subtype) { + case I40E_CEE_SUBTYPE_PG_CFG: + i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_PFC_CFG: + i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_APP_PRI: + i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** * i40e_parse_org_tlv * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data @@ -312,6 +496,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, case I40E_IEEE_8021QAZ_OUI: i40e_parse_ieee_tlv(tlv, dcbcfg); break; + case I40E_CEE_DCBX_OUI: + i40e_parse_cee_tlv(tlv, dcbcfg); + break; default: break; } @@ -502,15 +689,18 @@ static void i40e_cee_to_dcb_config( /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_1_MASK) >> - I40E_CEE_PGID_PRIO_1_SHIFT); - dcbcfg->etscfg.prioritytable[i*2] = tc; - tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + dcbcfg->etscfg.prioritytable[i * 2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) @@ -531,37 +721,85 @@ static void i40e_cee_to_dcb_config( dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; - status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> - I40E_AQC_CEE_APP_STATUS_SHIFT; + i = 0; + status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> + I40E_AQC_CEE_FCOE_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; - /* Add APPs if Error is False and Oper/Sync is True */ + /* Add FCoE APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { - /* CEE operating configuration supports FCoE/iSCSI/FIP only */ - dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; - /* FCoE APP */ - dcbcfg->app[0].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; - dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; - dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; + i++; + } + status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> + I40E_AQC_CEE_ISCSI_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add iSCSI APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { /* iSCSI APP */ - dcbcfg->app[1].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; - dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; - dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; + i++; + } + status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> + I40E_AQC_CEE_FIP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FIP APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { /* FIP APP */ - dcbcfg->app[2].priority = + dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; - dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; - dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; + i++; } + dcbcfg->numapps = i; +} + +/** + * i40e_get_ieee_dcb_config + * @hw: pointer to the hw struct + * + * Get IEEE mode DCB configuration from the Firmware + **/ +static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = 0; + + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + +out: + return ret; } /** @@ -579,7 +817,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) /* If Firmware version < v4.33 IEEE only */ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) - goto ieee; + return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 use old CEE struct */ if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { @@ -588,6 +826,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + le16_to_cpu(cee_v1_cfg.tlv_status); i40e_cee_to_dcb_v1_config(&cee_v1_cfg, &hw->local_dcbx_config); } @@ -597,6 +837,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + le32_to_cpu(cee_cfg.tlv_status); i40e_cee_to_dcb_config(&cee_cfg, &hw->local_dcbx_config); } @@ -604,16 +846,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) /* CEE mode not enabled try querying IEEE data */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) - goto ieee; - else + return i40e_get_ieee_dcb_config(hw); + + if (ret) goto out; -ieee: - /* IEEE mode */ - hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; - /* Get Local DCB Config */ + /* Get CEE DCB Desired Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, - &hw->local_dcbx_config); + &hw->desired_dcbx_config); if (ret) goto out; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h index e137e3fac..92d01042c 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -44,6 +44,15 @@ #define I40E_IEEE_SUBTYPE_PFC_CFG 11 #define I40E_IEEE_SUBTYPE_APP_PRI 12 +#define I40E_CEE_DCBX_OUI 0x001b21 +#define I40E_CEE_DCBX_TYPE 2 + +#define I40E_CEE_SUBTYPE_CTRL 1 +#define I40E_CEE_SUBTYPE_PG_CFG 2 +#define I40E_CEE_SUBTYPE_PFC_CFG 3 +#define I40E_CEE_SUBTYPE_APP_PRI 4 + +#define I40E_CEE_MAX_FEAT_TYPE 3 /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) @@ -58,9 +67,9 @@ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 #define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) #define I40E_IEEE_ETS_CBS_SHIFT 6 -#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) #define I40E_IEEE_ETS_WILLING_SHIFT 7 -#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) #define I40E_IEEE_ETS_PRIO_0_SHIFT 0 #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 @@ -79,9 +88,9 @@ #define I40E_IEEE_PFC_CAP_SHIFT 0 #define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) #define I40E_IEEE_PFC_MBC_SHIFT 6 -#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) #define I40E_IEEE_PFC_WILLING_SHIFT 7 -#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) +#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) /* Defines for IEEE APP TLV */ #define I40E_IEEE_APP_SEL_SHIFT 0 @@ -98,6 +107,36 @@ struct i40e_lldp_org_tlv { __be32 ouisubtype; u8 tlvinfo[1]; }; + +struct i40e_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct i40e_cee_ctrl_tlv { + struct i40e_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct i40e_cee_feat_tlv { + struct i40e_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 +#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 +#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define I40E_CEE_APP_SELECTOR_MASK 0x03 + __be16 lower_oui; + u8 prio_map; +}; #pragma pack() i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index bd5079d5c..886e667f2 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) /* Set up all the App TLVs if DCBx is negotiated */ for (i = 0; i < dcbxcfg->numapps; i++) { prio = dcbxcfg->app[i].priority; - tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]); /* Add APP only if the TC is enabled for this VSI */ if (tc_map & vsi->tc_config.enabled_tc) { @@ -236,14 +236,13 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, struct i40e_dcb_app_priority_table *app) { int v, err; + for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->netdev) { err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); - if (err) - dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", - __func__, pf->vsi[v]->seid, - err, app->selector, - app->protocolid, app->priority); + dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + pf->vsi[v]->seid, err, app->selector, + app->protocolid, app->priority); } } } diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index da0faf478..d4b7af9a2 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -404,82 +404,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", - (long unsigned int)nstat->rx_packets, - (long unsigned int)nstat->rx_bytes, - (long unsigned int)nstat->rx_errors, - (long unsigned int)nstat->rx_dropped); + (unsigned long int)nstat->rx_packets, + (unsigned long int)nstat->rx_bytes, + (unsigned long int)nstat->rx_errors, + (unsigned long int)nstat->rx_dropped); dev_info(&pf->pdev->dev, " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", - (long unsigned int)nstat->tx_packets, - (long unsigned int)nstat->tx_bytes, - (long unsigned int)nstat->tx_errors, - (long unsigned int)nstat->tx_dropped); + (unsigned long int)nstat->tx_packets, + (unsigned long int)nstat->tx_bytes, + (unsigned long int)nstat->tx_errors, + (unsigned long int)nstat->tx_dropped); dev_info(&pf->pdev->dev, " net_stats: multicast = %lu, collisions = %lu\n", - (long unsigned int)nstat->multicast, - (long unsigned int)nstat->collisions); + (unsigned long int)nstat->multicast, + (unsigned long int)nstat->collisions); dev_info(&pf->pdev->dev, " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", - (long unsigned int)nstat->rx_length_errors, - (long unsigned int)nstat->rx_over_errors, - (long unsigned int)nstat->rx_crc_errors); + (unsigned long int)nstat->rx_length_errors, + (unsigned long int)nstat->rx_over_errors, + (unsigned long int)nstat->rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", - (long unsigned int)nstat->rx_frame_errors, - (long unsigned int)nstat->rx_fifo_errors, - (long unsigned int)nstat->rx_missed_errors); + (unsigned long int)nstat->rx_frame_errors, + (unsigned long int)nstat->rx_fifo_errors, + (unsigned long int)nstat->rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", - (long unsigned int)nstat->tx_aborted_errors, - (long unsigned int)nstat->tx_carrier_errors, - (long unsigned int)nstat->tx_fifo_errors); + (unsigned long int)nstat->tx_aborted_errors, + (unsigned long int)nstat->tx_carrier_errors, + (unsigned long int)nstat->tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", - (long unsigned int)nstat->tx_heartbeat_errors, - (long unsigned int)nstat->tx_window_errors); + (unsigned long int)nstat->tx_heartbeat_errors, + (unsigned long int)nstat->tx_window_errors); dev_info(&pf->pdev->dev, " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", - (long unsigned int)nstat->rx_compressed, - (long unsigned int)nstat->tx_compressed); + (unsigned long int)nstat->rx_compressed, + (unsigned long int)nstat->tx_compressed); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_packets, - (long unsigned int)vsi->net_stats_offsets.rx_bytes, - (long unsigned int)vsi->net_stats_offsets.rx_errors, - (long unsigned int)vsi->net_stats_offsets.rx_dropped); + (unsigned long int)vsi->net_stats_offsets.rx_packets, + (unsigned long int)vsi->net_stats_offsets.rx_bytes, + (unsigned long int)vsi->net_stats_offsets.rx_errors, + (unsigned long int)vsi->net_stats_offsets.rx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_packets, - (long unsigned int)vsi->net_stats_offsets.tx_bytes, - (long unsigned int)vsi->net_stats_offsets.tx_errors, - (long unsigned int)vsi->net_stats_offsets.tx_dropped); + (unsigned long int)vsi->net_stats_offsets.tx_packets, + (unsigned long int)vsi->net_stats_offsets.tx_bytes, + (unsigned long int)vsi->net_stats_offsets.tx_errors, + (unsigned long int)vsi->net_stats_offsets.tx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: multicast = %lu, collisions = %lu\n", - (long unsigned int)vsi->net_stats_offsets.multicast, - (long unsigned int)vsi->net_stats_offsets.collisions); + (unsigned long int)vsi->net_stats_offsets.multicast, + (unsigned long int)vsi->net_stats_offsets.collisions); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_length_errors, - (long unsigned int)vsi->net_stats_offsets.rx_over_errors, - (long unsigned int)vsi->net_stats_offsets.rx_crc_errors); + (unsigned long int)vsi->net_stats_offsets.rx_length_errors, + (unsigned long int)vsi->net_stats_offsets.rx_over_errors, + (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_frame_errors, - (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors, - (long unsigned int)vsi->net_stats_offsets.rx_missed_errors); + (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, + (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, + (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors, - (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors, - (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors); + (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, + (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, + (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", - (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors, - (long unsigned int)vsi->net_stats_offsets.tx_window_errors); + (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, + (unsigned long int)vsi->net_stats_offsets.tx_window_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", - (long unsigned int)vsi->net_stats_offsets.rx_compressed, - (long unsigned int)vsi->net_stats_offsets.tx_compressed); + (unsigned long int)vsi->net_stats_offsets.rx_compressed, + (unsigned long int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", vsi->tx_restart, vsi->tx_busy, @@ -487,6 +487,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + if (!rx_ring) continue; @@ -527,7 +528,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, - (long unsigned int)rx_ring->dma); + (unsigned long int)rx_ring->dma); dev_info(&pf->pdev->dev, " rx_rings[%i]: vsi = %p, q_vector = %p\n", i, rx_ring->vsi, @@ -535,6 +536,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) continue; @@ -573,7 +575,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i, dma = 0x%08lx\n", i, tx_ring->size, - (long unsigned int)tx_ring->dma); + (unsigned long int)tx_ring->dma); dev_info(&pf->pdev->dev, " tx_rings[%i]: vsi = %p, q_vector = %p\n", i, tx_ring->vsi, @@ -743,6 +745,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) ring = &(hw->aq.asq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, @@ -755,6 +758,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) ring = &(hw->aq.arq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, @@ -949,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) } } -/** - * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the PF that would be altered - * @flag: flag that needs enabling or disabling - * @enable: Enable/disable FD SD/ATR - **/ -static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) -{ - if (enable) { - pf->flags |= flag; - } else { - pf->flags &= ~flag; - pf->auto_disable_flags |= flag; - } - dev_info(&pf->pdev->dev, "requesting a PF reset\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); -} - #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -1038,7 +1024,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { - sscanf(&cmd_buf[7], "%i", &vsi_seid); + cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del vsi: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", @@ -1145,8 +1137,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); - ret = i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ret = i40e_sync_vsi_filters(vsi, true); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", @@ -1182,8 +1176,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); - ret = i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ret = i40e_sync_vsi_filters(vsi, true); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", @@ -1471,23 +1467,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "empr", 4) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; + cnt = sscanf(&cmd_buf[4], "%i", &address); if (cnt != 1) { dev_info(&pf->pdev->dev, "read <reg>\n"); @@ -1495,9 +1492,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } /* check the range on address */ - if (address >= I40E_MAX_REGISTER) { - dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", - address); + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", + address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } @@ -1507,6 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "write", 5) == 0) { u32 address, value; + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); if (cnt != 2) { dev_info(&pf->pdev->dev, "write <reg> <value>\n"); @@ -1514,9 +1512,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } /* check the range on address */ - if (address >= I40E_MAX_REGISTER) { - dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", - address); + if (address > (pf->ioremap_len - sizeof(u32))) { + dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", + address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } wr32(&pf->hw, address, value); @@ -1528,6 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); if (cnt == 0) { int i; + for (i = 0; i < pf->num_alloc_vsi; i++) i40e_vsi_reset_stats(pf->vsi[i]); dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); @@ -1726,8 +1725,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); for (i = 0; i < packet_len; i++) { - sscanf(&asc_packet[j], "%2hhx ", - &raw_packet[i]); + cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]); + if (!cnt) + break; j += 3; } dev_info(&pf->pdev->dev, "FD raw packet dump\n"); @@ -1745,16 +1745,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; - } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); - } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; + ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, @@ -1779,6 +1776,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, I40E_ETH_P_LLDP, 0, @@ -1807,6 +1805,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, u16 llen, rlen; int ret; u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1833,6 +1832,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, u16 llen, rlen; int ret; u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1858,6 +1858,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, buff = NULL; } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, true, NULL); if (ret) { @@ -1868,6 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); if (ret) { @@ -1969,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); - dev_info(&pf->pdev->dev, " fd-atr off\n"); - dev_info(&pf->pdev->dev, " fd-atr on\n"); dev_info(&pf->pdev->dev, " fd current cnt"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); @@ -2105,6 +2105,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, } } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { int mtu; + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", &vsi_seid, &mtu); if (cnt != 2) { @@ -2220,7 +2221,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf) create_failed: dev_info(dev, "debugfs dir/file for %s failed\n", name); debugfs_remove_recursive(pf->i40e_dbg_pf); - return; } /** diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_devids.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_devids.h new file mode 100644 index 000000000..c601ca4a6 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c index 56438bd57..f141e78d4 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); if (!ret_code && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == - (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { - ret_code = i40e_validate_nvm_checksum(hw, NULL); - } else { - ret_code = I40E_ERR_DIAG_TEST_FAILED; - } - - return ret_code; + BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) + return i40e_validate_nvm_checksum(hw, NULL); + else + return I40E_ERR_DIAG_TEST_FAILED; } diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4cbaaeb90..3f385ffe4 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -87,11 +87,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), + I40E_VSI_STAT("tx_linearize", tx_linearize), }; -static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, - struct ethtool_rxnfc *cmd); - /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going @@ -114,7 +112,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), - I40E_PF_STAT("crc_errors", stats.crc_errors), + I40E_PF_STAT("rx_crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), @@ -147,7 +145,10 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), /* LPI stats */ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), @@ -194,7 +195,14 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ / sizeof(u64)) +#define I40E_VEB_TC_STATS_LEN ( \ + (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \ + / sizeof(u64)) #define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) +#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN) #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VSI_STATS_LEN((n))) @@ -219,10 +227,12 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "NPAR", + "LinkPolling", + "flow-director-atr", + "veb-stats", }; -#define I40E_PRIV_FLAGS_STR_LEN \ - (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) +#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) /** * i40e_partition_setting_complaint - generic complaint for MFP restriction @@ -243,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct ethtool_cmd *ecmd, - struct net_device *netdev) + struct net_device *netdev, + struct i40e_pf *pf) { struct i40e_link_status *hw_link_info = &hw->phy.link_info; u32 link_speed = hw_link_info->link_speed; @@ -262,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_40GBASE_AOC: ecmd->supported = SUPPORTED_40000baseCR4_Full; break; - case I40E_PHY_TYPE_40GBASE_KR4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_40000baseKR4_Full; - break; case I40E_PHY_TYPE_40GBASE_SR4: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: ecmd->supported = SUPPORTED_40000baseLR4_Full; break; - case I40E_PHY_TYPE_20GBASE_KR2: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_20000baseKR2_Full; - break; - case I40E_PHY_TYPE_10GBASE_KX4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKX4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKX4_Full; - break; - case I40E_PHY_TYPE_10GBASE_KR: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKR_Full; - break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full; + if (hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_SX || + hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_LX) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; - case I40E_PHY_TYPE_1000BASE_KX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseKX_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseKX_Full; break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: - case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_T_OPTICAL: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_100BASE_TX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ecmd->advertising |= ADVERTISED_100baseT_Full; break; @@ -340,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + break; + /* Backplane is set based on supported phy types in get_settings + * so don't set anything here but don't warn either + */ + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_1000BASE_KX: break; default: /* if we got here and link is up something bad is afoot */ @@ -384,64 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd, + struct i40e_pf *pf) { - struct i40e_link_status *hw_link_info = &hw->phy.link_info; + enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types; /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up + * supported phy types to figure out what info to display */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->supported = 0x0; + ecmd->advertising = 0x0; + if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; - break; - case I40E_DEV_ID_20G_KR2: - /* backplane 20G */ - ecmd->supported = SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_20000baseKR2_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; + } } + if (phy_types & I40E_CAP_PHY_TYPE_XAUI || + phy_types & I40E_CAP_PHY_TYPE_XFI || + phy_types & I40E_CAP_PHY_TYPE_SFI || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) + ecmd->supported |= SUPPORTED_10000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || + phy_types & I40E_CAP_PHY_TYPE_XLPPI || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) + ecmd->supported |= SUPPORTED_40000baseCR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + } + if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && + !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + ecmd->supported |= SUPPORTED_40000baseSR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) + ecmd->supported |= SUPPORTED_40000baseLR4_Full; /* With no link speed and duplex are unknown */ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); @@ -465,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev, bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; if (link_up) - i40e_get_settings_link_up(hw, ecmd, netdev); + i40e_get_settings_link_up(hw, ecmd, netdev, pf); else - i40e_get_settings_link_down(hw, ecmd); + i40e_get_settings_link_down(hw, ecmd, pf); /* Now set the settings that don't rely on link being up/down */ + /* For backplane, supported and advertised are only reliant on the + * phy types the NVM specifies are supported. + */ + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + ecmd->supported = SUPPORTED_Autoneg; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { + ecmd->supported |= SUPPORTED_40000baseKR4_Full; + ecmd->advertising |= ADVERTISED_40000baseKR4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { + ecmd->supported |= SUPPORTED_20000baseKR2_Full; + ecmd->advertising |= ADVERTISED_20000baseKR2_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { + ecmd->supported |= SUPPORTED_10000baseKR_Full; + ecmd->advertising |= ADVERTISED_10000baseKR_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { + ecmd->supported |= SUPPORTED_10000baseKX4_Full; + ecmd->advertising |= ADVERTISED_10000baseKX4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { + ecmd->supported |= SUPPORTED_1000baseKX_Full; + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + } + } + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -570,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev, hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + netdev_info(netdev, "Changing settings is not supported on backplane.\n"); + return -EOPNOTSUPP; + } + /* get our own copy of the bits to check against */ memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); i40e_get_settings(netdev, &safe_ecmd); @@ -606,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev, /* Check autoneg */ if (autoneg == AUTONEG_ENABLE) { - /* If autoneg is not supported, return error */ - if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { - netdev_info(netdev, "Autoneg not supported on this phy\n"); - return -EINVAL; - } /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; change = true; } } else { - /* If autoneg is supported 10GBASE_T is the only phy that - * can disable it, so otherwise return error - */ - if (safe_ecmd.supported & SUPPORTED_Autoneg && - hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { - netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); - return -EINVAL; - } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != + I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; @@ -654,6 +712,13 @@ static int i40e_set_settings(struct net_device *netdev, advertise & ADVERTISED_40000baseLR4_Full) config.link_speed |= I40E_LINK_SPEED_40GB; + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!config.link_speed) + config.link_speed = abilities.link_speed; + if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; @@ -670,7 +735,7 @@ static int i40e_set_settings(struct net_device *netdev, /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ - netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -678,15 +743,17 @@ static int i40e_set_settings(struct net_device *netdev, /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { - netdev_info(netdev, "Set phy config failed with error %d.\n", - status); + netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EAGAIN; } - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) - netdev_info(netdev, "Updating link info failed with error %d\n", - status); + netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -706,8 +773,9 @@ static int i40e_nway_reset(struct net_device *netdev) ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); if (ret) { - netdev_info(netdev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + netdev_info(netdev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -811,7 +879,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, /* Tell the OS link is going down, the link will go back up when fw * says it is ready asynchronously */ - netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -819,18 +887,21 @@ static int i40e_set_pauseparam(struct net_device *netdev, status = i40e_set_fc(hw, &aq_failures, link_up); if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } @@ -932,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1008,7 +1077,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; /* register returns value in power of 2, 64Kbyte chunks. */ - val = (64 * 1024) * (1 << val); + val = (64 * 1024) * BIT(val); return val; } @@ -1038,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && - hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1061,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, i40e_driver_version_str, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), + strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1150,6 +1215,11 @@ static int i40e_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated in the + * setup call + */ + tx_rings[i].desc = NULL; + tx_rings[i].rx_bi = NULL; err = i40e_setup_tx_descriptors(&tx_rings[i]); if (err) { while (i) { @@ -1180,6 +1250,11 @@ static int i40e_set_ringparam(struct net_device *netdev, /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; + /* the desc and bi pointers will be reallocated in the + * setup call + */ + rx_rings[i].desc = NULL; + rx_rings[i].rx_bi = NULL; err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) { while (i) { @@ -1247,8 +1322,9 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { int len = I40E_PF_STATS_LEN(netdev); - if (pf->lan_veb != I40E_NO_VEB) - len += I40E_VEB_STATS_LEN; + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) + len += I40E_VEB_STATS_TOTAL; return len; } else { return I40E_VSI_STATS_LEN(netdev); @@ -1320,14 +1396,22 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if (pf->lan_veb != I40E_NO_VEB) { + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { struct i40e_veb *veb = pf->veb[pf->lan_veb]; + for (j = 0; j < I40E_VEB_STATS_LEN; j++) { p = (char *)veb; p += i40e_gstrings_veb_stats[j].stat_offset; data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { + data[i++] = veb->tc_stats.tc_tx_packets[j]; + data[i++] = veb->tc_stats.tc_tx_bytes[j]; + data[i++] = veb->tc_stats.tc_rx_packets[j]; + data[i++] = veb->tc_stats.tc_rx_bytes[j]; + } } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -1393,12 +1477,27 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if (pf->lan_veb != I40E_NO_VEB) { + if ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { for (i = 0; i < I40E_VEB_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "veb.%s", i40e_gstrings_veb_stats[i].stat_string); p += ETH_GSTRING_LEN; } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + } } for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "port.%s", @@ -1461,20 +1560,11 @@ static int i40e_get_ts_info(struct net_device *dev, else info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } @@ -1483,9 +1573,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + i40e_status status; + bool link_up = false; netif_info(pf, hw, netdev, "link test\n"); - if (i40e_get_link_status(&pf->hw)) + status = i40e_get_link_status(&pf->hw, &link_up); + if (status) { + netif_err(pf, drv, netdev, "link query timed out, please retry test\n"); + *data = 1; + return *data; + } + + if (link_up) *data = 0; else *data = 1; @@ -1548,6 +1647,32 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data) return *data; } +static inline bool i40e_active_vfs(struct i40e_pf *pf) +{ + struct i40e_vf *vfs = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states)) + return true; + return false; +} + +static inline bool i40e_active_vmdqs(struct i40e_pf *pf) +{ + struct i40e_vsi **vsi = pf->vsi; + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!vsi[i]) + continue; + if (vsi[i]->type == I40E_VSI_VMDQ2) + return true; + } + + return false; +} + static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { @@ -1560,16 +1685,32 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { + dev_warn(&pf->pdev->dev, + "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LOOPBACK] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, &pf->state); + goto skip_ol_tests; + } + /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ dev_close(netdev); else - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + /* This reset does not affect link - if it is + * changed to a type of reset that does affect + * link then the following link test would have + * to be moved to before the reset + */ + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); - /* Link test performed before hardware reset - * so autoneg doesn't interfere with test result - */ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1587,7 +1728,7 @@ static void i40e_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__I40E_TESTING, &pf->state); - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); if (if_running) dev_open(netdev); @@ -1605,6 +1746,8 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_LOOPBACK] = 0; } +skip_ol_tests: + netif_info(pf, drv, netdev, "testing finished\n"); } @@ -1618,7 +1761,7 @@ static void i40e_get_wol(struct net_device *netdev, /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { + if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { wol->supported = 0; wol->wolopts = 0; } else { @@ -1651,7 +1794,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if (((1 << hw->port) & wol_nvm_bits)) + if (BIT(hw->port) & wol_nvm_bits) return -EOPNOTSUPP; /* only magic packet is supported */ @@ -1717,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev, ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + /* we use the _usecs_high to store/set the interrupt rate limit + * that the hardware supports, that almost but not quite + * fits the original intent of the ethtool variable, + * the rx_coalesce_usecs_high limits total interrupts + * per second from both tx/rx sources. + */ + ec->rx_coalesce_usecs_high = vsi->int_rate_limit; + ec->tx_coalesce_usecs_high = vsi->int_rate_limit; return 0; } @@ -1735,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; + /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ + if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { + netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + return -EINVAL; + } + vector = vsi->base_vector; if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { @@ -1748,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } + vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; @@ -1772,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + q_vector = vsi->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); i40e_flush(hw); } @@ -1997,10 +2164,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -2009,10 +2176,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -2021,12 +2188,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -2035,12 +2202,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -2053,7 +2220,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2062,15 +2229,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; default: return -EINVAL; @@ -2265,7 +2432,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->pctype = 0; input->dest_vsi = vsi->id; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; - input->cnt_index = pf->fd_sb_cnt_idx; + input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); input->flow_type = fsp->flow_type; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; @@ -2481,7 +2648,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @indir: indirection table * @key: hash key * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -2539,10 +2706,51 @@ static u32 i40e_get_priv_flags(struct net_device *dev) ret_flags |= pf->hw.func_caps.npar_enable ? I40E_PRIV_FLAGS_NPAR_FLAG : 0; + ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? + I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; + ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? + I40E_PRIV_FLAGS_FD_ATR : 0; + ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? + I40E_PRIV_FLAGS_VEB_STATS : 0; return ret_flags; } +/** + * i40e_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40e_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) + pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; + else + pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED; + + /* allow the user to control the state of the Flow + * Director ATR (Application Targeted Routing) feature + * of the driver + */ + if (flags & I40E_PRIV_FLAGS_FD_ATR) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } else { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + } + + if (flags & I40E_PRIV_FLAGS_VEB_STATS) + pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + + return 0; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2579,6 +2787,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, + .set_priv_flags = i40e_set_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 1803afeef..fe5d9bf3e 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) * * The FC EOF is converted to the value understood by HW for descriptor * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() - * first. + * first and that already checks for all supported valid eof values. **/ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) { @@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) case FC_EOF_A: return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; default: - /* FIXME: still returns 0 */ - pr_err("Unrecognized EOF %x\n", eof); - return 0; + /* Supported valid eof shall be already checked by + * calling i40e_fcoe_eof_is_supported() first, + * therefore this default case shall never hit. + */ + WARN_ON(1); + return -EINVAL; } } @@ -269,10 +272,8 @@ out: /** * i40e_fcoe_sw_init - sets up the HW for FCoE * @pf: pointer to PF - * - * Returns 0 if FCoE is supported otherwise the error code **/ -int i40e_init_pf_fcoe(struct i40e_pf *pf) +void i40e_init_pf_fcoe(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; @@ -283,20 +284,20 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) pf->fcoe_hmc_filt_num = 0; if (!pf->hw.func_caps.fcoe) { - dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); - return 0; + dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n"); + return; } if (!pf->hw.func_caps.dcb) { dev_warn(&pf->pdev->dev, "Hardware is not DCB capable not enabling FCoE.\n"); - return 0; + return; } /* enable FCoE hash filter */ val = rd32(hw, I40E_PFQF_HENA(1)); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; wr32(hw, I40E_PFQF_HENA(1), val); @@ -305,10 +306,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) pf->num_fcoe_qps = I40E_DEFAULT_FCOE; /* Reserve 4K DDP contexts and 20K filter size for FCoE */ - pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * - I40E_DMA_CNTX_BASE_SIZE; + pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) * + I40E_DMA_CNTX_BASE_SIZE; pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + - (1 << I40E_HASH_FILTER_SIZE_16K) * + BIT(I40E_HASH_FILTER_SIZE_16K) * I40E_HASH_FILTER_BASE_SIZE; /* FCoE object: max 16K filter buckets and 4K DMA contexts */ @@ -323,7 +324,6 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) wr32(hw, I40E_GLFCOE_RCTL, val); dev_info(&pf->pdev->dev, "FCoE is supported.\n"); - return 0; } /** @@ -345,7 +345,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app.protocolid == ETH_P_FCOE) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT(tc); break; } } @@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) * same PCI function. */ netdev->dev_port = 1; + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* use san mac */ ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 0d49e2d15..a93174dde 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -59,9 +59,9 @@ (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 9b987ccc9..5ebe12d56 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -116,6 +116,7 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry @@ -129,12 +130,14 @@ exit: **/ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index) + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) { i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; @@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { - /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, - I40E_HMC_PAGED_BP_SIZE, - I40E_HMC_PD_BP_BUF_ALIGNMENT); - if (ret_code) - goto exit; + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } - pd_entry->bp.addr = mem; + pd_entry->bp.addr = *page; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ - page_desc = mem.pa | 0x1; + page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; @@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); if (ret_code) goto exit; if (!pd_table->ref_cnt) @@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; - i40e_status ret_code = 0; + + if (!is_pf) + return I40E_NOT_SUPPORTED; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + + return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); } /** @@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; + if (!is_pf) + return I40E_NOT_SUPPORTED; + sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + + return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); } diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 732a02660..d90669211 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 0079ad7bc..79ae7beea 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, obj->cnt = txq_num; obj->base = 0; size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { @@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { @@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { @@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { @@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, - i); + i, NULL); if (ret_code) { pd_error = true; break; @@ -431,9 +431,8 @@ exit_sd_error: pd_idx1 = max(pd_idx, ((j - 1) * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); - for (i = pd_idx1; i < pd_lmt1; i++) { + for (i = pd_idx1; i < pd_lmt1; i++) i40e_remove_pd_bp(hw, info->hmc_info, i); - } i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); break; case I40E_SD_TYPE_DIRECT: @@ -763,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u8)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; src_byte = *from; src_byte &= mask; @@ -804,7 +803,7 @@ static void i40e_write_word(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u16)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -854,7 +853,7 @@ static void i40e_write_dword(u8 *hmc_bits, * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) - mask = ((u32)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; @@ -906,7 +905,7 @@ static void i40e_write_qword(u8 *hmc_bits, * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) - mask = ((u64)1 << ce_info->width) - 1; + mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c index 5b5bea159..4a9873ec2 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 46 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -75,7 +75,13 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ {0, } }; @@ -210,10 +216,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, ret = i; pile->search_hint = i + j; break; - } else { - /* not enough, so skip over it and continue looking */ - i += j; } + + /* not enough, so skip over it and continue looking */ + i += j; } return ret; @@ -296,25 +302,69 @@ static void i40e_tx_timeout(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + struct i40e_ring *tx_ring = NULL; + unsigned int i, hung_queue = 0; + u32 head, val; pf->tx_timeout_count++; + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start ? : netdev->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) - pf->tx_timeout_recovery_level = 1; + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, + (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + if (tx_ring) { + head = i40e_get_head(tx_ring); + /* Read interrupt register */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + val = rd32(&pf->hw, + I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + else + val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + + netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->seid, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d\n", - pf->tx_timeout_recovery_level); + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); switch (pf->tx_timeout_recovery_level) { - case 0: - /* disable and re-enable queues for the VSI */ - if (in_interrupt()) { - set_bit(__I40E_REINIT_REQUESTED, &pf->state); - set_bit(__I40E_REINIT_REQUESTED, &vsi->state); - } else { - i40e_vsi_reinit_locked(vsi); - } - break; case 1: set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); break; @@ -326,10 +376,9 @@ static void i40e_tx_timeout(struct net_device *netdev) break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); - set_bit(__I40E_DOWN_REQUESTED, &pf->state); - set_bit(__I40E_DOWN_REQUESTED, &vsi->state); break; } + i40e_service_event_schedule(pf); pf->tx_timeout_recovery_level++; } @@ -428,6 +477,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( stats->tx_errors = vsi_stats->tx_errors; stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; + stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; @@ -453,11 +503,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { - memset(&vsi->rx_rings[i]->stats, 0 , + memset(&vsi->rx_rings[i]->stats, 0, sizeof(vsi->rx_rings[i]->stats)); - memset(&vsi->rx_rings[i]->rx_stats, 0 , + memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); - memset(&vsi->tx_rings[i]->stats, 0 , + memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); @@ -520,7 +570,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, if (likely(new_data >= *offset)) *stat = new_data - *offset; else - *stat = (new_data + ((u64)1 << 48)) - *offset; + *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } @@ -543,7 +593,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else - *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); + *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** @@ -621,11 +671,15 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ - int idx = 0; + struct i40e_veb_tc_stats *veb_oes; + struct i40e_veb_tc_stats *veb_es; + int i, idx = 0; idx = veb->stats_idx; es = &veb->stats; oes = &veb->stats_offsets; + veb_es = &veb->tc_stats; + veb_oes = &veb->tc_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), @@ -661,6 +715,28 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), veb->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), + I40E_GLVEBTC_RPCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_rx_packets[i], + &veb_es->tc_rx_packets[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), + I40E_GLVEBTC_RBCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_rx_bytes[i], + &veb_es->tc_rx_bytes[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), + I40E_GLVEBTC_TPCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_tx_packets[i], + &veb_es->tc_tx_packets[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), + I40E_GLVEBTC_TBCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_tx_bytes[i], + &veb_es->tc_tx_bytes[i]); + } veb->stat_offsets_loaded = true; } @@ -725,7 +801,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; u64 xoff = 0; - u16 i, v; if ((hw->fc.current_mode != I40E_FC_FULL) && (hw->fc.current_mode != I40E_FC_RX_PAUSE)) @@ -740,18 +815,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) if (!(nsd->link_xoff_rx - xoff)) return; - /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - - if (!vsi || !vsi->tx_rings[0]) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = vsi->tx_rings[i]; - clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); - } - } } /** @@ -767,20 +830,20 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; struct i40e_dcbx_config *dcb_cfg; struct i40e_hw *hw = &pf->hw; - u16 i, v; + u16 i; u8 tc; dcb_cfg = &hw->local_dcbx_config; - /* See if DCB enabled with PFC TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || - !(dcb_cfg->pfc.pfcenable)) { + /* Collect Link XOFF stats when PFC is disabled */ + if (!dcb_cfg->pfc.pfcenable) { i40e_update_link_xoff_rx(pf); return; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { u64 prio_xoff = nsd->priority_xoff_rx[i]; + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_rx[i], @@ -793,23 +856,6 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) tc = dcb_cfg->etscfg.prioritytable[i]; xoff[tc] = true; } - - /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - - if (!vsi || !vsi->tx_rings[0]) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = vsi->tx_rings[i]; - - tc = ring->dcb_tc; - if (xoff[tc]) - clear_bit(__I40E_HANG_CHECK_ARMED, - &ring->state); - } - } } /** @@ -834,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u32 rx_page, rx_buf; u64 bytes, packets; unsigned int start; + u64 tx_linearize; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -852,7 +899,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) */ rx_b = rx_p = 0; tx_b = tx_p = 0; - tx_restart = tx_busy = 0; + tx_restart = tx_busy = tx_linearize = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -869,6 +916,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; + tx_linearize += p->tx_stats.tx_linearize; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -885,6 +933,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; + vsi->tx_linearize = tx_linearize; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1097,12 +1146,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_match, &nsd->fd_atr_match); - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), + pf->stat_offsets_loaded, + &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = @@ -1118,6 +1173,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + nsd->fd_sb_status = true; + else + nsd->fd_sb_status = false; + + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + nsd->fd_atr_status = true; + else + nsd->fd_atr_status = false; + pf->stat_offsets_loaded = true; } @@ -1210,7 +1277,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * so we have to go through all the list in order to make sure */ list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (f->vlan >= 0) + if (f->vlan >= 0 || vsi->info.pvid) return true; } @@ -1235,6 +1302,8 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter *f; list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (vsi->info.pvid) + f->vlan = le16_to_cpu(vsi->info.pvid); if (!i40e_find_filter(vsi, macaddr, f->vlan, is_vf, is_netdev)) { if (!i40e_add_filter(vsi, macaddr, f->vlan, @@ -1259,7 +1328,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; - i40e_status aq_ret; + i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) @@ -1270,8 +1339,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - if (aq_ret) + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + if (ret) return -ENOENT; return 0; @@ -1286,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1344,6 +1416,9 @@ add_filter_out: * @vlan: the vlan * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1371,6 +1446,7 @@ void i40e_del_filter(struct i40e_vsi *vsi, } else { /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; + min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); @@ -1429,6 +1505,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; + ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); @@ -1448,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { @@ -1460,13 +1539,15 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); if (f) f->is_laa = true; + spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi); + i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; @@ -1509,7 +1590,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) /* TC is enabled */ + if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1528,14 +1609,18 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, * vectors available and so we need to lower the used * q count. */ - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + else + qcount = vsi->alloc_queue_pairs; num_tc_qps = qcount / numtc; - num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); + num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + /* TC is enabled */ int pow, num_qps; switch (vsi->type) { @@ -1561,7 +1646,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; - while (num_qps && ((1 << pow) < qcount)) { + while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } @@ -1591,7 +1676,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; - else + else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; } @@ -1632,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { if (!i40e_find_mac(vsi, uca->addr, false, true)) { @@ -1657,37 +1744,29 @@ static void i40e_set_rx_mode(struct net_device *netdev) /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - bool found = false; if (!f->is_netdev) continue; - if (is_multicast_ether_addr(f->macaddr)) { - netdev_for_each_mc_addr(mca, netdev) { - if (ether_addr_equal(mca->addr, f->macaddr)) { - found = true; - break; - } - } - } else { - netdev_for_each_uc_addr(uca, netdev) { - if (ether_addr_equal(uca->addr, f->macaddr)) { - found = true; - break; - } - } + netdev_for_each_mc_addr(mca, netdev) + if (ether_addr_equal(mca->addr, f->macaddr)) + goto bottom_of_search_loop; - for_each_dev_addr(netdev, ha) { - if (ether_addr_equal(ha->addr, f->macaddr)) { - found = true; - break; - } - } - } - if (!found) - i40e_del_filter( - vsi, f->macaddr, I40E_VLAN_ANY, false, true); + netdev_for_each_uc_addr(uca, netdev) + if (ether_addr_equal(uca->addr, f->macaddr)) + goto bottom_of_search_loop; + + for_each_dev_addr(netdev, ha) + if (ether_addr_equal(ha->addr, f->macaddr)) + goto bottom_of_search_loop; + + /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); + +bottom_of_search_loop: + continue; } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { @@ -1697,24 +1776,101 @@ static void i40e_set_rx_mode(struct net_device *netdev) } /** + * i40e_mac_filter_entry_clone - Clones a MAC filter entry + * @src: source MAC filter entry to be clones + * + * Returns the pointer to newly cloned MAC filter entry or NULL + * in case of error + **/ +static struct i40e_mac_filter *i40e_mac_filter_entry_clone( + struct i40e_mac_filter *src) +{ + struct i40e_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + *f = *src; + + INIT_LIST_HEAD(&f->list); + + return f; +} + +/** + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from list were slated to be removed from device. + **/ +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct list_head *from) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + f->changed = true; + /* Move the element back into MAC filter list*/ + list_move_tail(&f->list, &vsi->mac_filter_list); + } +} + +/** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * + * MAC filter entries from list were slated to be added from device. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed && f->counter) + f->changed = true; + } +} + +/** + * i40e_cleanup_add_list - Deletes the element from add list and release + * memory + * @add_list: Pointer to list which contains MAC filter entries + **/ +static void i40e_cleanup_add_list(struct list_head *add_list) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, add_list, list) { + list_del(&f->list); + kfree(f); + } +} + +/** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI + * @grab_rtnl: whether RTNL needs to be grabbed * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ -int i40e_sync_vsi_filters(struct i40e_vsi *vsi) +int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) { - struct i40e_mac_filter *f, *ftmp; + struct list_head tmp_del_list, tmp_add_list; + struct i40e_mac_filter *f, *ftmp, *fclone; bool promisc_forced_on = false; bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; - i40e_status aq_ret = 0; + bool err_cond = false; + i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; + int aq_err = 0; u16 cmd_flags; /* empty array typed pointers, kcalloc later */ @@ -1730,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi->current_netdev_flags = vsi->netdev->flags; } + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); - if (!del_list) - return -ENOMEM; - + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; @@ -1748,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (f->counter != 0) continue; f->changed = false; + + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + + /* Clone MAC filter entry and add into temporary list */ + fclone = i40e_mac_filter_entry_clone(f); + if (!fclone) { + err_cond = true; + break; + } + list_add_tail(&fclone->list, &tmp_add_list); + } + + /* if failed to clone MAC filter entry - undo */ + if (err_cond) { + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (err_cond) + i40e_cleanup_add_list(&tmp_add_list); + } + + /* Now process 'del_list' outside the lock */ + if (!list_empty(&tmp_del_list)) { + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) { + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo VSI's MAC filter entry element updates */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; + } + + list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { cmd_flags = 0; /* add to delete list */ @@ -1760,41 +1964,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) del_list[num_del].flags = cmd_flags; num_del++; - /* unlink from filter list */ - list_del(&f->list); - kfree(f); - /* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, del_list, num_del, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; memset(del_list, 0, sizeof(*del_list)); - if (aq_ret && - pf->hw.aq.asq_last_status != - I40E_AQ_RC_ENOENT) - dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", - aq_ret, - pf->hw.aq.asq_last_status); + if (ret && aq_err != I40E_AQ_RC_ENOENT) + dev_err(&pf->pdev->dev, + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } + /* Release memory for MAC filter entries which were + * synced up with HW. + */ + list_del(&f->list); + kfree(f); } + if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "ignoring delete macvlan error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } kfree(del_list); del_list = NULL; + } + + if (!list_empty(&tmp_add_list)) { /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / @@ -1802,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) add_list = kcalloc(filter_list_len, sizeof(struct i40e_aqc_add_macvlan_element_data), GFP_KERNEL); - if (!add_list) + if (!add_list) { + /* Purge element from temporary lists */ + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo add filter entries from VSI MAC filter list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; + } - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - if (f->counter == 0) - continue; - f->changed = false; add_happened = true; cmd_flags = 0; @@ -1828,29 +2040,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (aq_ret) + if (ret) break; memset(add_list, 0, sizeof(*add_list)); } + /* Entries from tmp_add_list were cloned from MAC + * filter list, hence clean those cloned entries + */ + list_del(&f->list); + kfree(f); } + if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { + if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { dev_info(&pf->pdev->dev, - "add filter failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "add filter failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { @@ -1865,35 +2085,67 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; + cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (aq_ret) + ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (ret) dev_info(&pf->pdev->dev, - "set multi promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multi promisc failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; + cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) - dev_info(&pf->pdev->dev, - "set uni promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) + if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { + /* set defport ON for Main VSI instead of true promisc + * this way we will get all unicast/multicast and VLAN + * promisc behavior but will not get VF or VMDq traffic + * replicated on the Main VSI. + */ + if (pf->cur_promisc != cur_promisc) { + pf->cur_promisc = cur_promisc; + if (grab_rtnl) + i40e_do_reset_safe(pf, + BIT(__I40E_PF_RESET_REQUESTED)); + else + i40e_do_reset(pf, + BIT(__I40E_PF_RESET_REQUESTED)); + } + } else { + ret = i40e_aq_set_vsi_unicast_promiscuous( + &vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) + dev_info(&pf->pdev->dev, + "set unicast promisc failed, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + ret = i40e_aq_set_vsi_multicast_promiscuous( + &vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) + dev_info(&pf->pdev->dev, + "set multicast promisc failed, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + } + ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) dev_info(&pf->pdev->dev, - "set brdcast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } clear_bit(__I40E_CONFIG_BUSY, &vsi->state); @@ -1915,7 +2167,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) - i40e_sync_vsi_filters(pf->vsi[v]); + i40e_sync_vsi_filters(pf->vsi[v], true); } } @@ -1989,8 +2241,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2018,8 +2272,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2052,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); + /* Locked once because all functions invoked below iterates list*/ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) { add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, is_vf, is_netdev); @@ -2059,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2069,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2090,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2098,27 +2360,33 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev)) { - i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); - add_f = i40e_add_filter(vsi, f->macaddr, - 0, is_vf, is_netdev); - if (!add_f) { - dev_info(&vsi->back->pdev->dev, - "Could not add filter 0 for %pM\n", - f->macaddr); - return -ENOMEM; - } + if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) + continue; + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function will lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; - return i40e_sync_vsi_filters(vsi); + return i40e_sync_vsi_filters(vsi, false); } /** @@ -2138,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); + /* Locked once because all functions invoked below iterates list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); @@ -2168,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2176,21 +2448,27 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) list_for_each_entry(f, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); + is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function with lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; - return i40e_sync_vsi_filters(vsi); + return i40e_sync_vsi_filters(vsi, false); } /** @@ -2289,7 +2567,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; - i40e_status aq_ret; + i40e_status ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); @@ -2299,11 +2577,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) ctxt.seid = vsi->seid; ctxt.info = vsi->info; - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "add pvid failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); return -ENOENT; } @@ -2522,8 +2802,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); - clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); - /* cache tail off for easier writes later */ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); @@ -2585,7 +2863,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif @@ -2691,9 +2970,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) #endif /* I40E_FCOE */ /* round up for the chip's needs */ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, - (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, - (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -2723,7 +3002,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { - if (!(vsi->tc_config.enabled_tc & (1 << n))) + if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; @@ -2794,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi) static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - struct i40e_q_vector *q_vector; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; - u32 val; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn @@ -2808,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - q_vector = vsi->q_vectors[i]; + struct i40e_q_vector *q_vector = vsi->q_vectors[i]; + + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -2817,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), + INTRL_USEC_TO_REG(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { + u32 val; + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | @@ -2872,6 +3155,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + if (pf->flags & I40E_FLAG_IWARP_ENABLED) + val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; @@ -2897,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) u32 val; /* set the ITR configuration */ + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); @@ -2955,24 +3242,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) } /** - * i40e_irq_dynamic_enable - Enable default interrupt generation settings - * @vsi: pointer to a vsi - * @vector: enable a particular Hw Interrupt vector - **/ -void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 val; - - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - /* skip the flush */ -} - -/** * i40e_irq_dynamic_disable - Disable default interrupt generation settings * @vsi: pointer to a vsi * @vector: disable a particular Hw Interrupt vector @@ -3000,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -3045,8 +3314,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector); if (err) { dev_info(&pf->pdev->dev, - "%s: request_irq failed, error: %d\n", - __func__, err); + "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* assign the mask for this irq */ @@ -3111,8 +3379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - for (i = vsi->base_vector; - i < (vsi->num_q_vectors + vsi->base_vector); i++) + for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { i40e_irq_dynamic_enable_icr0(pf); @@ -3162,11 +3429,21 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; + if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); + } + /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* temporarily disable queue cause for NAPI processing */ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(0), qval); @@ -3175,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data) wr32(hw, I40E_QINT_TQCTL(0), qval); if (!test_bit(__I40E_DOWN, &pf->state)) - napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -3336,10 +3613,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) i += tx_ring->count; tx_ring->next_to_clean = i; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - i40e_irq_dynamic_enable(vsi, - tx_ring->q_vector->v_idx + vsi->base_vector); - } + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) + i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); + return budget > 0; } @@ -3368,7 +3644,7 @@ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) * @v_idx: vector index * @qp_idx: queue pair index **/ -static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) +static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; @@ -3422,7 +3698,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) q_vector->tx.ring = NULL; while (num_ringpairs--) { - map_vector_to_qp(vsi, v_start, qp_idx); + i40e_map_vector_to_qp(vsi, v_start, qp_idx); qp_idx++; qp_remaining--; } @@ -3477,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_DOWN, &vsi->state)) return; - pf->flags |= I40E_FLAG_IN_NETPOLL; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } - pf->flags &= ~I40E_FLAG_IN_NETPOLL; } #endif @@ -3565,9 +3839,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ret = i40e_pf_txq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Tx ring %d %sable timeout\n", - __func__, vsi->seid, pf_q, - (enable ? "en" : "dis")); + "VSI seid %d Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } @@ -3643,9 +3916,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Rx ring %d %sable timeout\n", - __func__, vsi->seid, pf_q, - (enable ? "en" : "dis")); + "VSI seid %d Rx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } @@ -3924,6 +4196,7 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); + vsi->current_netdev_flags = 0; } /** @@ -3939,17 +4212,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && vsi->type == I40E_VSI_FCOE) { dev_dbg(&vsi->back->pdev->dev, - "%s: VSI seid %d skipping FCoE VSI disable\n", - __func__, vsi->seid); + "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); return; } set_bit(__I40E_NEEDS_RESTART, &vsi->state); - if (vsi->netdev && netif_running(vsi->netdev)) { + if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - } else { + else i40e_vsi_close(vsi); - } } /** @@ -4014,8 +4285,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, - "%s: VSI seid %d Tx ring %d disable timeout\n", - __func__, vsi->seid, pf_q); + "VSI seid %d Tx ring %d disable timeout\n", + vsi->seid, pf_q); return ret; } } @@ -4047,6 +4318,108 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) } #endif + +/** + * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue + * @q_idx: TX queue number + * @vsi: Pointer to VSI struct + * + * This function checks specified queue for given VSI. Detects hung condition. + * Sets hung bit since it is two step process. Before next run of service task + * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, + * hung condition remain unchanged and during subsequent run, this function + * issues SW interrupt to recover from hung condition. + **/ +static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring = NULL; + struct i40e_pf *pf; + u32 head, val, tx_pending; + int i; + + pf = vsi->back; + + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (q_idx == vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + + if (!tx_ring) + return; + + /* Read interrupt register */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + val = rd32(&pf->hw, + I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + else + val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + + head = i40e_get_head(tx_ring); + + tx_pending = i40e_get_tx_pending(tx_ring); + + /* Interrupts are disabled and TX pending is non-zero, + * trigger the SW interrupt (don't wait). Worst case + * there will be one extra interrupt which may result + * into not cleaning any queues because queues are cleaned. + */ + if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) + i40e_force_wb(vsi, tx_ring->q_vector); +} + +/** + * i40e_detect_recover_hung - Function to detect and recover hung_queues + * @pf: pointer to PF struct + * + * LAN VSI has netdev and netdev has TX queues. This function is to check + * each of those TX queues if they are hung, trigger recovery by issuing + * SW interrupt. + **/ +static void i40e_detect_recover_hung(struct i40e_pf *pf) +{ + struct net_device *netdev; + struct i40e_vsi *vsi; + int i; + + /* Only for LAN VSI */ + vsi = pf->vsi[pf->lan_vsi]; + + if (!vsi) + return; + + /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return; + + /* Make sure type is MAIN VSI */ + if (vsi->type != I40E_VSI_MAIN) + return; + + netdev = vsi->netdev; + if (!netdev) + return; + + /* Bail out if netif_carrier is not OK */ + if (!netif_carrier_ok(netdev)) + return; + + /* Go thru' TX queues for netdev */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + + q = netdev_get_tx_queue(netdev, i); + if (q) + i40e_detect_recover_hung_queue(i, vsi); + } +} + /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF @@ -4068,7 +4441,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT_ULL(tc); break; } } @@ -4117,7 +4490,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) u8 i; for (i = 0; i < num_tc; i++) - enabled_tc |= 1 << i; + enabled_tc |= BIT(i); return enabled_tc; } @@ -4152,7 +4525,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) num_tc++; } return num_tc; @@ -4174,11 +4547,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) break; } - return 1 << i; + return BIT(i); } /** @@ -4216,26 +4589,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; + i40e_status ret; u32 tc_bw_max; int i; /* Get the VSI level BW configuration */ - aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ - aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, + NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi ets bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -4274,16 +4649,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; - i40e_status aq_ret; + i40e_status ret; int i; bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; - aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, - NULL); - if (aq_ret) { + ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, + NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", vsi->back->hw.aq.asq_last_status); @@ -4332,7 +4707,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & (1 << i)) + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4394,7 +4769,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_share[i] = 1; } @@ -4418,8 +4793,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Update vsi tc config failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ @@ -4430,8 +4807,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, - "Failed updating vsi bw info, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Failed updating vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } @@ -4464,7 +4843,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -4472,8 +4851,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "veb bw config failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "VEB bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -4481,8 +4861,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "Failed getting veb bw config, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "Failed getting veb bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4569,8 +4950,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "AQ command Resume Port Tx failed = %d\n", - pf->hw.aq.asq_last_status); + "Resume Port Tx failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); @@ -4622,8 +5004,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) } } else { dev_info(&pf->pdev->dev, - "AQ Querying DCB configuration failed: aq_err %d\n", - pf->hw.aq.asq_last_status); + "Query for DCB configuration failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4636,11 +5019,14 @@ out: * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message */ -static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { - char speed[SPEED_SIZE] = "Unknown"; - char fc[FC_SIZE] = "RX/TX"; + char *speed = "Unknown"; + char *fc = "Unknown"; + if (vsi->current_isup == isup) + return; + vsi->current_isup = isup; if (!isup) { netdev_info(vsi->netdev, "NIC Link is Down\n"); return; @@ -4657,19 +5043,19 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: - strlcpy(speed, "40 Gbps", SPEED_SIZE); + speed = "40 G"; break; case I40E_LINK_SPEED_20GB: - strncpy(speed, "20 Gbps", SPEED_SIZE); + speed = "20 G"; break; case I40E_LINK_SPEED_10GB: - strlcpy(speed, "10 Gbps", SPEED_SIZE); + speed = "10 G"; break; case I40E_LINK_SPEED_1GB: - strlcpy(speed, "1000 Mbps", SPEED_SIZE); + speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: - strncpy(speed, "100 Mbps", SPEED_SIZE); + speed = "100 M"; break; default: break; @@ -4677,20 +5063,20 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.fc.current_mode) { case I40E_FC_FULL: - strlcpy(fc, "RX/TX", FC_SIZE); + fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: - strlcpy(fc, "TX", FC_SIZE); + fc = "TX"; break; case I40E_FC_RX_PAUSE: - strlcpy(fc, "RX", FC_SIZE); + fc = "RX"; break; default: - strlcpy(fc, "None", FC_SIZE); + fc = "None"; break; } - netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", + netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", speed, fc); } @@ -4739,7 +5125,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) pf->fd_add_err = pf->fd_atr_cnt = 0; if (pf->fd_tcp_rule > 0) { pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); pf->fd_tcp_rule = 0; } i40e_fdir_filter_restore(vsi); @@ -4853,7 +5240,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= (1 << i); + enabled_tc |= BIT_ULL(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -4992,7 +5379,7 @@ err_setup_rx: err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return err; } @@ -5060,7 +5447,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ - if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { + if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * @@ -5075,7 +5462,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * @@ -5087,7 +5474,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset * @@ -5100,7 +5487,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); - } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ @@ -5108,22 +5495,21 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) "VSI reinit requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { i40e_vsi_reinit_locked(pf->vsi[v]); clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); } } - - /* no further action needed, so return now */ - return; - } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { set_bit(__I40E_DOWN, &vsi->state); @@ -5131,13 +5517,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } - - /* no further action needed, so return now */ - return; } else { dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); - return; } } @@ -5193,8 +5575,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } - dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, - need_reconfig); + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } @@ -5221,16 +5602,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - dev_dbg(&pf->pdev->dev, - "%s: LLDP event mib bridge type 0x%x\n", __func__, type); + dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; dev_dbg(&pf->pdev->dev, - "%s: LLDP event mib type %s\n", __func__, - type ? "remote" : "local"); + "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, @@ -5247,7 +5626,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); + dev_info(&pf->pdev->dev, + "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto exit; } @@ -5412,7 +5794,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) **/ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { + struct i40e_fdir_filter *filter; u32 fcnt_prog, fcnt_avail; + struct hlist_node *node; if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) return; @@ -5428,7 +5812,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } /* Wait for some more space to be available to turn on ATR */ @@ -5436,7 +5821,20 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + } + } + + /* if hw had a problem adding a filter, delete it */ + if (pf->fd_inv > 0) { + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + if (filter->fd_id == pf->fd_inv) { + hlist_del(&filter->fdir_node); + kfree(filter); + pf->fdir_pf_active_filters--; + } } } } @@ -5458,47 +5856,51 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; - if (time_after(jiffies, pf->fd_flush_timestamp + - (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { - /* If the flush is happening too quick and we have mostly - * SB rules we should not re-enable ATR for some time. - */ - min_flush_time = pf->fd_flush_timestamp - + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); - fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + if (!time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) + return; + + /* If the flush is happening too quick and we have mostly SB rules we + * should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; - if (!(time_after(jiffies, min_flush_time)) && - (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); - disable_atr = true; - } + disable_atr = true; + } - pf->fd_flush_timestamp = jiffies; - pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - /* flush all filters */ - wr32(&pf->hw, I40E_PFQF_CTL_1, - I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); - i40e_flush(&pf->hw); - pf->fd_flush_cnt++; - pf->fd_add_err = 0; - do { - /* Check FD flush status every 5-6msec */ - usleep_range(5000, 6000); - reg = rd32(&pf->hw, I40E_PFQF_CTL_1); - if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) - break; - } while (flush_wait_retry--); - if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { - dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); - } else { - /* replay sideband filters */ - i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - if (!disable_atr) - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->fd_flush_timestamp = jiffies; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); - } } + } /** @@ -5606,15 +6008,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) **/ static void i40e_link_event(struct i40e_pf *pf) { - bool new_link, old_link; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; + i40e_status status; + bool new_link, old_link; /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); - new_link = i40e_get_link_status(&pf->hw); + + status = i40e_get_link_status(&pf->hw, &new_link); + if (status) { + dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", + status); + return; + } + old_link_speed = pf->hw.phy.link_info_old.link_speed; new_link_speed = pf->hw.phy.link_info.link_speed; @@ -5643,68 +6053,6 @@ static void i40e_link_event(struct i40e_pf *pf) } /** - * i40e_check_hang_subtask - Check for hung queues and dropped interrupts - * @pf: board private structure - * - * Set the per-queue flags to request a check for stuck queues in the irq - * clean functions, then force interrupts to be sure the irq clean is called. - **/ -static void i40e_check_hang_subtask(struct i40e_pf *pf) -{ - int i, v; - - /* If we're down or resetting, just bail */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) - return; - - /* for each VSI/netdev - * for each Tx queue - * set the check flag - * for each q_vector - * force an interrupt - */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - int armed = 0; - - if (!pf->vsi[v] || - test_bit(__I40E_DOWN, &vsi->state) || - (vsi->netdev && !netif_carrier_ok(vsi->netdev))) - continue; - - for (i = 0; i < vsi->num_queue_pairs; i++) { - set_check_for_tx_hang(vsi->tx_rings[i]); - if (test_bit(__I40E_HANG_CHECK_ARMED, - &vsi->tx_rings[i]->state)) - armed++; - } - - if (armed) { - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { - wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, - (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | - I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | - I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); - } else { - u16 vec = vsi->base_vector - 1; - u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); - for (i = 0; i < vsi->num_q_vectors; i++, vec++) - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(vec), val); - } - i40e_flush(&vsi->back->hw); - } - } -} - -/** * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ @@ -5723,8 +6071,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - i40e_check_hang_subtask(pf); - i40e_link_event(pf); + if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) + i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to @@ -5733,10 +6081,12 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); - /* Update the stats for the active switching components */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i]) - i40e_update_veb_stats(pf->veb[i]); + if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { + /* Update the stats for the active switching components */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i]) + i40e_update_veb_stats(pf->veb[i]); + } i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); } @@ -5751,23 +6101,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_REINIT_REQUESTED); + reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_DOWN_REQUESTED); + reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -5973,27 +6323,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6007,27 +6359,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6043,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; - dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", - veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else @@ -6087,7 +6442,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, - "rebuild of owner VSI failed: %d\n", ret); + "rebuild of veb_idx %d owner VSI failed: %d\n", + veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); @@ -6110,6 +6466,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (pf->vsi[v]->veb_idx == veb->idx) { struct i40e_vsi *vsi = pf->vsi[v]; + vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { @@ -6166,18 +6523,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf) buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, - "capability discovery failed: aq=%d\n", - pf->hw.aq.asq_last_status); + "capability discovery failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); - if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || - (pf->hw.aq.fw_maj_ver < 2)) { - pf->hw.func_caps.num_msix_vectors++; - pf->hw.func_caps.num_msix_vectors_vf++; - } - if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -6353,7 +6706,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } @@ -6363,11 +6718,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); - if (ret) { - dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", - ret); + if (ret) goto end_core_reset; - } ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, @@ -6391,9 +6743,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } #endif /* CONFIG_I40E_DCB */ #ifdef I40E_FCOE - ret = i40e_init_pf_fcoe(pf); - if (ret) - dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); + i40e_init_pf_fcoe(pf); #endif /* do basic switch setup */ @@ -6408,12 +6758,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (ret) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) - dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -6474,13 +6828,24 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); @@ -6637,8 +7002,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & (1 << i)) { - pf->pending_vxlan_bitmap &= ~(1 << i); + if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { + pf->pending_vxlan_bitmap &= ~BIT_ULL(i); port = pf->vxlan_ports[i]; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), @@ -6649,10 +7014,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) if (ret) { dev_info(&pf->pdev->dev, - "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + "%s vxlan port %d, index %d failed, err %s aq_err %s\n", port ? "add" : "delete", - ntohs(port), i, ret, - pf->hw.aq.asq_last_status); + ntohs(port), i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->vxlan_ports[i] = 0; } } @@ -6677,6 +7044,7 @@ static void i40e_service_task(struct work_struct *work) return; } + i40e_detect_recover_hung(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); @@ -6860,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; @@ -6878,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + /* Initialize VSI lock */ + spin_lock_init(&vsi->mac_filter_list_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; @@ -7003,6 +7374,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->count = vsi->num_desc; tx_ring->size = 0; tx_ring->dcb_tc = 0; + if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; + if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7401,62 +7776,141 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) } /** - * i40e_config_rss - Prepare for RSS if used + * i40e_config_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + bool pf_lut = false; + u8 *rss_lut; + int ret, i; + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); + if (!rss_lut) + return -ENOMEM; + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0; i < vsi->rss_table_size; i++) + rss_lut[i] = i % vsi->rss_size; + + ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + goto config_rss_aq_out; + } + + if (vsi->type == I40E_VSI_MAIN) + pf_lut = true; + + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, + vsi->rss_table_size); + if (ret) + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + +config_rss_aq_out: + kfree(rss_lut); + return ret; +} + +/** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_pf *pf = vsi->back; + + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed); + + return 0; +} + +/** + * i40e_config_rss_reg - Prepare for RSS if used * @pf: board private structure + * @seed: RSS hash seed **/ -static int i40e_config_rss(struct i40e_pf *pf) +static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) { - u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; + u32 *seed_dw = (u32 *)seed; + u32 current_queue = 0; u32 lut = 0; int i, j; - u64 hena; - u32 reg_val; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); + /* Fill out hash function seed */ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (current_queue == vsi->rss_size) + current_queue = 0; + lut |= ((current_queue) << (8 * j)); + current_queue++; + } + wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + } + i40e_flush(hw); + + return 0; +} + +/** + * i40e_config_rss - Prepare for RSS if used + * @pf: board private structure + **/ +static int i40e_config_rss(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + u64 hena; + + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); - hena |= I40E_DEFAULT_RSS_HENA; + hena |= i40e_pf_get_default_rss_hena(pf); + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Check capability and Set table size and register per hw expectation*/ + /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (pf->rss_table_size == 512) - reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - else - reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; + reg_val = (pf->rss_table_size == 512) ? + (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : + (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { - - /* The assumption is that lan qp count will be the highest - * qp count for any PF VSI that needs RSS. - * If multiple VSIs need RSS support, all the qp counts - * for those VSIs should be a power of 2 for RSS to work. - * If LAN VSI is the only consumer for RSS then this requirement - * is not necessary. - */ - if (j == vsi->rss_size) - j = 0; - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (j & - ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) - wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); - } - i40e_flush(hw); - - return 0; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + else + return i40e_config_rss_reg(pf, seed); } /** @@ -7523,7 +7977,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) i40e_status status; /* Set the valid bit for this PF */ - bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; @@ -7557,8 +8011,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for read access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7573,8 +8028,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", - ret, last_aq_status); + dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7586,8 +8042,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for write access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, @@ -7605,8 +8062,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %d aq_err %d\n", - ret, last_aq_status); + "BW settings NOT SAVED, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; @@ -7638,6 +8096,7 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | + I40E_FLAG_LINK_POLLING_ENABLED | I40E_FLAG_MSIX_ENABLED; if (iommu_present(&pci_bus_type)) @@ -7652,7 +8111,7 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ - pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, @@ -7663,7 +8122,7 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* MFP mode enabled */ - if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_npar_bw_setting(pf)) @@ -7680,16 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per PF */ - pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per PF */ - pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); - } else { + if (pf->flags & I40E_FLAG_MFP_ENABLED && + pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); - } + else + pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = @@ -7697,15 +8152,13 @@ static int i40e_sw_init(struct i40e_pf *pf) } if (pf->hw.func_caps.vmdq) { - pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; + pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } #ifdef I40E_FCOE - err = i40e_init_pf_fcoe(pf); - if (err) - dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); + i40e_init_pf_fcoe(pf); #endif /* I40E_FCOE */ #ifdef CONFIG_PCI_IOV @@ -7717,10 +8170,21 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + if (pf->hw.mac.type == I40E_MAC_X722) { + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | + I40E_FLAG_128_QP_RSS_CAPABLE | + I40E_FLAG_HW_ATR_EVICT_CAPABLE | + I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | + I40E_FLAG_WB_ON_ITR_CAPABLE | + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; + /* By default FW has this off for performance reasons */ + pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); @@ -7775,7 +8239,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fdir_pf_active_filters = 0; pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) @@ -7805,7 +8270,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return 0; } @@ -7868,10 +8333,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= (1 << next_idx); + pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); } /** @@ -7899,11 +8362,8 @@ static void i40e_del_vxlan_port(struct net_device *netdev, * and make it pending */ pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= (1 << idx); + pf->pending_vxlan_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", - ntohs(port)); } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); @@ -7974,7 +8434,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -#ifdef HAVE_BRIDGE_ATTRIBS /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured @@ -7988,7 +8447,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * bridge mode enabled. **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8055,18 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused + * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ -#ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 __always_unused filter_mask, int nlflags) -#else -static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, int nlflags) -#endif /* HAVE_BRIDGE_FILTER */ + u32 __always_unused filter_mask, + int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8088,9 +8545,27 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - nlflags); + nlflags, 0, 0, filter_mask, NULL); +} + +#define I40E_MAX_TUNNEL_HDR_LEN 80 +/** + * i40e_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t i40e_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation && + (skb_inner_mac_header(skb) - skb_transport_header(skb) > + I40E_MAX_TUNNEL_HDR_LEN)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; } -#endif /* HAVE_BRIDGE_ATTRIBS */ static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, @@ -8126,10 +8601,9 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, -#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, -#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -8159,6 +8633,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8166,6 +8641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -8191,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + } } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -8257,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) return 1; veb = pf->veb[vsi->veb_idx]; + if (!veb) { + dev_info(&pf->pdev->dev, + "There is no veb associated with the bridge\n"); + return -ENOENT; + } + /* Uplink is a bridge in VEPA mode */ - if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; + } else { + /* Uplink is a bridge in VEB mode */ + return 1; + } - /* Uplink is a bridge in VEB mode */ - return 1; + /* VEPA is now default bridge, so return 0 */ + return 0; } /** @@ -8275,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; - struct i40e_mac_filter *f, *ftmp; + u8 laa_macaddr[ETH_ALEN]; + bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; + struct i40e_mac_filter *f, *ftmp; + u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; @@ -8297,8 +8795,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; @@ -8320,8 +8820,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "update vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8338,9 +8840,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { dev_info(&pf->pdev->dev, - "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", - enabled_tc, ret, - pf->hw.aq.asq_last_status); + "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", + enabled_tc, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; } } @@ -8431,8 +8935,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "add vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8442,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } + spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { f->changed = true; f_count++; + /* Expected to have only one MAC filter entry for LAA in list */ if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - struct i40e_aqc_remove_macvlan_element_data element; + ether_addr_copy(laa_macaddr, f->macaddr); + found_laa_mac_filter = true; + } + } + spin_unlock_bh(&vsi->mac_filter_list_lock); - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, f->macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } + if (found_laa_mac_filter) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - f->macaddr, NULL); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, laa_macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + laa_macaddr, NULL); } + if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -8477,8 +8992,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get vsi bw info, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } @@ -8529,10 +9045,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, f->is_vf, f->is_netdev); - i40e_sync_vsi_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + i40e_sync_vsi_filters(vsi, false); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); @@ -8608,6 +9127,11 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } + /* In Legacy mode, we do not have to get any other vector since we + * piggyback on the misc/ICR0 for queue interrupts. + */ + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, vsi->num_q_vectors, vsi->idx); @@ -8651,7 +9175,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, - "failed to get tracking for %d queues for VSI %d err=%d\n", + "failed to get tracking for %d queues for VSI %d err %d\n", vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } @@ -8752,8 +9276,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, if (veb) { if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { dev_info(&vsi->back->pdev->dev, - "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", - __func__); + "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } /* We come up by default in VEPA mode if SRIOV is not @@ -8850,6 +9373,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } + if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + (vsi->type == I40E_VSI_VMDQ2)) { + ret = i40e_vsi_config_rss(vsi); + } return vsi; err_rings: @@ -8889,8 +9416,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -8898,8 +9426,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw ets config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw ets config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -9083,36 +9612,40 @@ void i40e_veb_release(struct i40e_veb *veb) **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { - bool is_default = false; + struct i40e_pf *pf = veb->pf; + bool is_default = veb->pf->cur_promisc; bool is_cloud = false; int ret; /* get a VEB from the hardware */ - ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, + ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, is_cloud, &veb->seid, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't add VEB, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't add VEB, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ - ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, + ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB statistics idx, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't get VEB statistics idx, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB bw info, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); - i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); + dev_info(&pf->pdev->dev, + "couldn't get VEB bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } @@ -9318,8 +9851,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "get switch config failed %d aq_err=%x\n", - ret, pf->hw.aq.asq_last_status); + "get switch config failed err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } @@ -9360,8 +9895,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, - "couldn't fetch switch config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't fetch switch config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); @@ -9390,6 +9926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); @@ -9413,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9531,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) } pf->queues_left = queues_left; + dev_dbg(&pf->pdev->dev, + "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", + pf->hw.func_caps.num_tx_qp, + !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, + pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); #ifdef I40E_FCOE - dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); + dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif } @@ -9600,12 +10143,19 @@ static void i40e_print_features(struct i40e_pf *pf) } if (pf->flags & I40E_FLAG_DCB_CAPABLE) buf += sprintf(buf, "DCB "); +#if IS_ENABLED(CONFIG_VXLAN) + buf += sprintf(buf, "VxLAN "); +#endif if (pf->flags & I40E_FLAG_PTP) buf += sprintf(buf, "PTP "); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) buf += sprintf(buf, "FCOE "); #endif + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + buf += sprintf(buf, "VEB "); + else + buf += sprintf(buf, "VEPA "); BUG_ON(buf > (string + INFO_STRING_LEN)); dev_info(&pf->pdev->dev, "%s\n", string); @@ -9626,14 +10176,15 @@ static void i40e_print_features(struct i40e_pf *pf) static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct i40e_aq_get_phy_abilities_resp abilities; - unsigned long ioremap_len; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; + u16 wol_nvm_bits; u16 link_status; - int err = 0; + int err; u32 len; u32 i; + u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -9679,15 +10230,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &pf->hw; hw->back = pf; - ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), - I40E_MAX_CSR_SPACE); + pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", (unsigned int)pci_resource_start(pdev, 0), - (unsigned int)pci_resource_len(pdev, 0), err); + pf->ioremap_len, err); goto err_ioremap; } hw->vendor_id = pdev->vendor; @@ -9736,15 +10287,28 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_shared_code(hw); if (err) { - dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); goto err_pf_reset; } /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + err = i40e_init_adminq(hw); - dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); + + /* provide nvm, fw, api versions */ + dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + i40e_nvm_version_str(hw)); + if (err) { dev_info(&pdev->dev, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); @@ -9844,10 +10408,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, &pf->state); pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; - pf->link_check_timeout = jiffies; - /* WoL defaults to disabled */ - pf->wol_en = false; + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) + pf->wol_en = false; + else + pf->wol_en = true; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); /* set up the main switch operations */ @@ -9888,6 +10455,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + + /* Make sure flow control is set according to current settings */ + err = i40e_set_fc(hw, &set_fc_aq_fail, true); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_phy_cap\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on set_phy_config\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_link_info\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { @@ -9903,15 +10489,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (err) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector @@ -9974,35 +10564,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_fcoe_vsi_setup(pf); #endif - /* Get the negotiated link width and speed from PCI config space */ - pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); +#define PCI_SPEED_SIZE 8 +#define PCI_WIDTH_SIZE 8 + /* Devices on the IOSF bus do not have this information + * and will report PCI Gen 1 x 1 by default so don't bother + * checking them. + */ + if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { + char speed[PCI_SPEED_SIZE] = "Unknown"; + char width[PCI_WIDTH_SIZE] = "Unknown"; - i40e_set_pci_config_data(hw, link_status); + /* Get the negotiated link width and speed from PCI config + * space + */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, + &link_status); + + i40e_set_pci_config_data(hw, link_status); + + switch (hw->bus.speed) { + case i40e_bus_speed_8000: + strncpy(speed, "8.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_5000: + strncpy(speed, "5.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_2500: + strncpy(speed, "2.5", PCI_SPEED_SIZE); break; + default: + break; + } + switch (hw->bus.width) { + case i40e_bus_width_pcie_x8: + strncpy(width, "8", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x4: + strncpy(width, "4", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x2: + strncpy(width, "2", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x1: + strncpy(width, "1", PCI_WIDTH_SIZE); break; + default: + break; + } - dev_info(&pdev->dev, "PCI-Express: %s %s\n", - (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : - hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : - hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : - "Unknown"), - (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : - hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : - "Unknown")); + dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", + speed, width); - if (hw->bus.width < i40e_bus_width_pcie_x8 || - hw->bus.speed < i40e_bus_speed_8000) { - dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); - dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", - err); + dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* get the supported phy types from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); + if (err) + dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); + + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* print a string summarizing features */ i40e_print_features(pf); @@ -10050,6 +10687,7 @@ err_dma: static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; @@ -10057,11 +10695,14 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); + /* Disable RSS in hw */ + wr32(hw, I40E_PFQF_HENA(0), 0); + wr32(hw, I40E_PFQF_HENA(1), 0); + /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); - i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); @@ -10104,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev) "Failed to destroy the Admin Queue resources: %d\n", ret_code); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { @@ -10173,7 +10818,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) int err; u32 reg; - dev_info(&pdev->dev, "%s\n", __func__); + dev_dbg(&pdev->dev, "%s\n", __func__); if (pci_enable_device_mem(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); @@ -10213,13 +10858,13 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); - dev_info(&pdev->dev, "%s\n", __func__); + dev_dbg(&pdev->dev, "%s\n", __func__); if (test_bit(__I40E_SUSPENDED, &pf->state)) return; rtnl_lock(); i40e_handle_reset_warning(pf); - rtnl_lock(); + rtnl_unlock(); } /** @@ -10240,6 +10885,19 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, + (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, + (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_clear_interrupt_scheme(pf); if (system_state == SYSTEM_POWER_OFF) { @@ -10260,9 +10918,6 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); - i40e_fdir_teardown(pf); rtnl_lock(); i40e_prep_for_reset(pf); @@ -10295,9 +10950,7 @@ static int i40e_resume(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, - "%s: Cannot enable PCI device from suspend\n", - __func__); + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 554e49d02..6100cdd9a 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); /* Switching to words (sr_size contains power of 2KB) */ - nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; + nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); @@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { /* Write the address and start reading */ - sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | - (1 << I40E_GLNVM_SRCTL_START_SHIFT); + sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + BIT(I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); /* Poll I40E_GLNVM_SRCTL until the done bit is set */ @@ -212,6 +212,74 @@ read_nvm_exit: } /** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + i40e_status ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + memset(&cmd_details, 0, sizeof(cmd_details)); + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); + *data = le16_to_cpu(*(__le16 *)data); + + return ret_code; +} + +/** * i40e_read_nvm_word - Reads Shadow RAM * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) @@ -222,7 +290,18 @@ read_nvm_exit: i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - return i40e_read_nvm_word_srctl(hw, offset, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } + return ret_code; } /** @@ -257,6 +336,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code; + u16 read_size = *words; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + do { + /* Calculate number of bytes we should read in this step. + * FVL AQ do not allow to read more than one page at a time or + * to cross page boundaries. + */ + if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) + read_size = min(*words, + (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - + (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); + else + read_size = min((*words - words_read), + I40E_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, + data + words_read, last_cmd); + if (ret_code) + goto read_nvm_buffer_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + +read_nvm_buffer_aq_exit: + *words = words_read; + return ret_code; +} + +/** * i40e_read_nvm_buffer - Reads Shadow RAM buffer * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). @@ -270,7 +406,19 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - return i40e_read_nvm_buffer_srctl(hw, offset, words, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + return ret_code; } /** @@ -289,6 +437,10 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, bool last_command) { i40e_status ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire @@ -314,7 +466,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ - data, last_command, NULL); + data, last_command, &cmd_details); return ret_code; } @@ -332,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code = 0; + i40e_status ret_code; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; @@ -412,13 +564,16 @@ i40e_calc_nvm_checksum_exit: **/ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code; u16 checksum; + __le16 le_sum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); - if (!ret_code) + if (!ret_code) { + le_sum = cpu_to_le16(checksum); ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, - 1, &checksum, true); + 1, &le_sum, true); + } return ret_code; } @@ -463,25 +618,31 @@ i40e_validate_nvm_checksum_exit: static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *errno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno); + int *perrno); static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno); + int *perrno); static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); + u8 *bytes, int *perrno); +static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static inline u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -491,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } -static char *i40e_nvm_update_state_str[] = { +static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", @@ -505,6 +666,9 @@ static char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_CSUM_CON", "I40E_NVMUPD_CSUM_SA", "I40E_NVMUPD_CSUM_LCB", + "I40E_NVMUPD_STATUS", + "I40E_NVMUPD_EXEC_AQ", + "I40E_NVMUPD_GET_AQ_RESULT", }; /** @@ -512,30 +676,60 @@ static char *i40e_nvm_update_state_str[] = { * @hw: pointer to hardware structure * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Dispatches command depending on what update state is current **/ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; /* assume success */ - *errno = 0; + *perrno = 0; + + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); + } + + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + bytes[0] = hw->nvmupd_state; + return 0; + } switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: - status = i40e_nvmupd_state_init(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_READING: - status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_WRITING: - status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno); + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + status = I40E_ERR_NOT_READY; + *perrno = -EBUSY; break; default: @@ -543,7 +737,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -554,28 +748,28 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Process legitimate commands of the Init state and conditionally set next * state. Reject all other commands. **/ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); } break; @@ -583,10 +777,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else @@ -597,70 +791,83 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_erase(hw, cmd, errno); - if (status) + status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); + if (status) { i40e_release_nvm(hw); - else + } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } } break; case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (status) + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { i40e_release_nvm(hw); - else + } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } } break; case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(status, + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { - *errno = hw->aq.asq_last_status ? + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; + case I40E_NVMUPD_EXEC_AQ: + status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_GET_AQ_RESULT: + status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); + break; + default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -671,28 +878,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands. **/ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { - i40e_status status; + i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: case I40E_NVMUPD_READ_CON: - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_READ_LCB: - status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; @@ -702,7 +909,7 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, "NVMUPD: bad cmd %s in reading state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } return status; @@ -713,55 +920,68 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands **/ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { - i40e_status status; + i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = false; - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (!status) + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; break; case I40E_NVMUPD_WRITE_LCB: - status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (!status) + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } break; case I40E_NVMUPD_CSUM_CON: status = i40e_update_nvm_checksum(hw); if (status) { - *errno = hw->aq.asq_last_status ? + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_LCB: status = i40e_update_nvm_checksum(hw); - if (status) - *errno = hw->aq.asq_last_status ? + if (status) { + *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; - else + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } break; default: @@ -769,7 +989,7 @@ retry: "NVMUPD: bad cmd %s in writing state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; - *errno = -ESRCH; + *perrno = -ESRCH; break; } @@ -812,21 +1032,22 @@ retry: * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * Return one of the valid command types or I40E_NVMUPD_INVALID **/ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno) + int *perrno) { enum i40e_nvmupd_cmd upd_cmd; - u8 transaction; + u8 module, transaction; /* anything that doesn't match a recognized case is an error */ upd_cmd = I40E_NVMUPD_INVALID; transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); /* limits on data size */ if ((cmd->data_size < 1) || @@ -834,7 +1055,7 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command data_size %d\n", cmd->data_size); - *errno = -EFAULT; + *perrno = -EFAULT; return I40E_NVMUPD_INVALID; } @@ -853,6 +1074,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_READ_SA; break; + case I40E_NVM_EXEC: + if (module == 0xf) + upd_cmd = I40E_NVMUPD_STATUS; + else if (module == 0) + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; } break; @@ -882,21 +1109,155 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, case (I40E_NVM_CSUM|I40E_NVM_LCB): upd_cmd = I40E_NVMUPD_CSUM_LCB; break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; } break; } - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", - i40e_nvm_update_state_str[upd_cmd], - hw->nvmupd_state, - hw->aq.nvm_release_on_done); - if (upd_cmd == I40E_NVMUPD_INVALID) { - *errno = -EFAULT; + return upd_cmd; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + i40e_status status; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command returns %d errno %d\n", - upd_cmd, *errno); + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; } - return upd_cmd; + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); + } + + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + memcpy(buff, &bytes[aq_desc_len], aq_data_len); + } + } + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_exec_aq err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + memcpy(bytes, buff, len); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + memcpy(bytes, buff, remainder); + } + + return 0; } /** @@ -904,14 +1265,15 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { + struct i40e_asq_cmd_details cmd_details; i40e_status status; u8 module, transaction; bool last; @@ -920,8 +1282,11 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - bytes, last, NULL); + bytes, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", @@ -929,7 +1294,7 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; @@ -939,23 +1304,28 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, * i40e_nvmupd_nvm_erase - Erase an NVM module * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - int *errno) + int *perrno) { i40e_status status = 0; + struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - last, NULL); + last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", @@ -963,7 +1333,7 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; @@ -974,15 +1344,16 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer - * @errno: pointer to return error code + * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, - u8 *bytes, int *errno) + u8 *bytes, int *perrno) { i40e_status status = 0; + struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; @@ -990,8 +1361,12 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + status = i40e_aq_update_nvm(hw, module, cmd->offset, - (u16)cmd->data_size, bytes, last, NULL); + (u16)cmd->data_size, bytes, last, + &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", @@ -999,7 +1374,7 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write status %d aq %d\n", status, hw->aq.asq_last_status); - *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h index ad802dd0f..5b6feb7ed 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -35,7 +35,7 @@ #include <linux/highuid.h> /* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> /* File to be the magic between shared code and * actual OS primitives diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 7b34f1e66..bb9d583e5 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -58,6 +58,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); @@ -245,7 +258,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); -bool i40e_get_link_status(struct i40e_hw *hw); +i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +i40e_status i40e_update_link_info(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, @@ -308,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c index a92b7725d..565ca7c83 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -43,9 +43,8 @@ #define I40E_PTP_10GB_INCVAL 0x0333333333ULL #define I40E_PTP_1GB_INCVAL 0x2000000000ULL -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ - I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) /** @@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); - if (!(prttsyn_stat & (1 << index))) + if (!(prttsyn_stat & BIT(index))) return; lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); @@ -619,9 +618,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) /* Attempt to register the clock before enabling the hardware. */ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); - if (IS_ERR(pf->ptp_clock)) { + if (IS_ERR(pf->ptp_clock)) return PTR_ERR(pf->ptp_clock); - } /* clear the hwtstamp settings here during clock create, instead of * during regular init, so that we can maintain settings across a @@ -676,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf) struct timespec64 ts; u32 regval; - dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, - netdev->name); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "PHC enabled\n"); pf->flags |= I40E_FLAG_PTP; /* Ensure the clocks are running. */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h index 522d6df51..dc0402fe3 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -873,6 +873,13 @@ #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) @@ -3366,4 +3373,1933 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif + +#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ +#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 +#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) +#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ +#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 +#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 +#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) +#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 +#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 +#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) +#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ +#define I40E_MNGSB_FDS_START_BC_SHIFT 0 +#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) +#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 +#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) + +#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) +#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) + +#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) + +#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) +#define I40E_GL_FWSTS_FWROWD_SHIFT 8 +#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) +#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) +#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) +#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) +#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) +#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) +#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ +#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 +#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) +#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QBASE_MAX_INDEX 127 +#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 +#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) +#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 +#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) +#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) +#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ +#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 +#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) +#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 +#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) +#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 +#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) +#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 +#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) +#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) + +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 +#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) +#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 +#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 +#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) +#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 +#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) +#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ +#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 +#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 +#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) +#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 +#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 +#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 +#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 +#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) +#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 +#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 +#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) +#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) +#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ +#define I40E_MNGSB_DADD_ADDR_SHIFT 0 +#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) +#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ +#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 +#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) +#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ +#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 +#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) +#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 +#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) +#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 +#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 +#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) +#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 +#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) +#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ +#define I40E_MNGSB_RDATA_DATA_SHIFT 0 +#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) +#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ +#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 +#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) +#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 +#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) +#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 +#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) +#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) +#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 +#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) +#define I40E_MNGSB_RHDR0_EH_SHIFT 31 +#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) +#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 +#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 +#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) +#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ +#define I40E_MNGSB_WDATA_DATA_SHIFT 0 +#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) +#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ +#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 +#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) +#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 +#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 +#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) +#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ +#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 +#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) +#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ +#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 +#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) + +#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 +#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) + +#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) +#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) + +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) +#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ +#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 +#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) +#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) +#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ +#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) +#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) +#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ +#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) +/* Redefined for X722 family */ +#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 +#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) +#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HKEY_MAX_INDEX 12 +#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) +#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) +#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) +#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) +#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HLUT_MAX_INDEX 15 +#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 +#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) +#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 +#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) +#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 +#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) +#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 +#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) +#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) +#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9d95042d5..635b3ac17 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); - /* set the timestamp */ - tx_buf->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ @@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (add) { pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } } else { @@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, (pf->fd_tcp_rule - 1) : 0; if (pf->fd_tcp_rule == 0) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); } } @@ -465,11 +464,12 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id); if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", - rx_desc->wb.qword0.hi_dword.fd_id); + pf->fd_inv); /* Check if the programming error is for ATR. * If so, auto disable ATR and set a state for @@ -501,7 +501,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } @@ -509,8 +510,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_info(&pdev->dev, "FD filter programming failed due to incorrect filter parameters\n"); } - } else if (error == - (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", rx_desc->wb.qword0.hi_dword.fd_id); @@ -602,27 +602,13 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - -/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -static u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring) { u32 head, tail; @@ -636,50 +622,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) return 0; } -/** - * i40e_check_tx_hang - Is there a hang in the Tx queue - * @tx_ring: the ring of descriptors - **/ -static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) -{ - u32 tx_done = tx_ring->stats.packets; - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = i40e_get_tx_pending(tx_ring); - struct i40e_pf *pf = tx_ring->vsi->back; - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * PFC clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, - &tx_ring->state); - } else if (tx_done_old == tx_done && - (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { - if (I40E_DEBUG_FLOW & pf->hw.debug_mask) - dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", - tx_pending, tx_ring->queue_index); - pf->tx_sluggish_count++; - } else { - /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_done; - clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - #define WB_STRIDE 0x3 /** @@ -785,46 +727,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - else - tx_ring->arm_wb = false; - - if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" - " VSI <%d>\n" - " Tx Queue <%d>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n", - tx_ring->vsi->seid, - tx_ring->queue_index, - tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - dev_info(tx_ring->dev, - "tx hang detected on queue %d, reset requested\n", - tx_ring->queue_index); - - /* do not fire the reset immediately, wait for the stack to - * decide we are truly stuck, also prevents every queue from - * simultaneously requesting a reset + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + unsigned int j = 0; + + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. */ + j = i40e_get_tx_pending(tx_ring); - /* the adapter is about to reset, no point in enabling polling */ - budget = 1; + if (budget && + ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, @@ -856,23 +773,50 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * @q_vector: the vector on which to force writeback * **/ -static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), - val); + u16 flags = q_vector->tx.ring[0].flags; + + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), val); + } else { + u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } } /** * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -881,22 +825,33 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -908,58 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: + case I40E_ULTRA_LATENCY: + default: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; break; } + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + + rc->latency_range = new_latency_range; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } - rc->total_bytes = 0; rc->total_packets = 0; -} -/** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /** @@ -1005,6 +954,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!dev) return -ENOMEM; + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) @@ -1165,6 +1116,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) struct device *dev = rx_ring->dev; int bi_size; + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) @@ -1345,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -1390,7 +1338,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -1405,25 +1353,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1432,7 +1380,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel) { + if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + (ipv4_tunnel)) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -1520,7 +1469,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_node_id(); + const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1547,7 +1496,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1588,8 +1537,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1598,6 +1547,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) cleaned_count++; if (rx_hbo || rx_sph) { int len; + if (rx_hbo) len = I40E_RX_HDR_SIZE; else @@ -1641,7 +1591,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1651,11 +1601,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1676,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1688,7 +1635,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1738,7 +1684,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1761,7 +1707,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1779,17 +1725,14 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1810,7 +1753,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1821,7 +1764,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) #endif i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1835,6 +1777,96 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_PFINT_DYN_CTLN + +/** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + bool rx = false, tx = false; + u32 rxval, txval; + int vector; + + vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); + } + + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); + + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; + } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + +} + /** * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1853,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1866,58 +1898,62 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb |= ring->arm_wb; + ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) i40e_force_wb(vsi, q_vector); return budget; } + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) { - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - i40e_irq_dynamic_enable(vsi, - q_vector->v_idx + vsi->base_vector); - } else { - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - - i40e_irq_dynamic_enable_icr0(vsi->back); - } + napi_complete_done(napi, work_done); + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_update_enable_itr(vsi, q_vector); + } else { /* Legacy mode */ + struct i40e_hw *hw = &vsi->back->hw; + /* We re-enable the queue 0 cause, but + * don't worry about dynamic_enable + * because we left it on for the other + * possible interrupts during napi + */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK; + + wr32(hw, I40E_QINT_RQCTL(0), qval); + qval = rd32(hw, I40E_QINT_TQCTL(0)) | + I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + i40e_irq_dynamic_enable_icr0(vsi->back); } - return 0; } @@ -1925,11 +1961,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * i40e_atr - Add a Flow Director ATR filter * @tx_ring: ring to add programming descriptor to * @skb: send buffer - * @flags: send flags + * @tx_flags: send tx flags * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 flags, __be16 protocol) + u32 tx_flags, __be16 protocol) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1954,30 +1990,50 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) + return; - /* Currently only IPv4/IPv6 with TCP is supported */ - if (protocol == htons(ETH_P_IP)) { - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - } else if (protocol == htons(ETH_P_IPV6)) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) + /* Currently only IPv4/IPv6 with TCP is supported + * access ihl as u8 to avoid unaligned access on ia64 + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) + hlen = (hdr.network[0] & 0x0F) << 2; + else if (protocol == htons(ETH_P_IPV6)) + hlen = sizeof(struct ipv6hdr); + else return; - - hlen = sizeof(struct ipv6hdr); } else { - return; + hdr.network = skb_inner_network_header(skb); + hlen = skb_inner_network_header_len(skb); } + /* Currently only IPv4/IPv6 with TCP is supported + * Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if ((tx_flags & I40E_TX_FLAGS_IPV4) && + (hdr.ipv4->protocol != IPPROTO_TCP)) + return; + else if ((tx_flags & I40E_TX_FLAGS_IPV6) && + (hdr.ipv6->nexthdr != IPPROTO_TCP)) + return; + th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + /* HW ATR eviction will take care of removing filters on FIN + * and RST packets. + */ + if (th->fin || th->rst) + return; + } tx_ring->atr_count++; @@ -2022,9 +2078,19 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - dtype_cmd |= - ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + dtype_cmd |= + ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + else + dtype_cmd |= + ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); @@ -2045,13 +2111,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, * otherwise returns 0 to indicate the flags has been set properly. **/ #ifdef I40E_FCOE -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -#else -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, +inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) +#else +static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) #endif { __be16 protocol = skb->protocol; @@ -2077,6 +2143,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) return -EINVAL; @@ -2119,16 +2186,15 @@ out: * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header + * @cd_type_cmd_tso_mss: ptr to u64 object * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2181,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information + * @cd_type_cmd_tso_mss: ptr to u64 object * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2220,12 +2287,13 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -2235,12 +2303,20 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; + case IPPROTO_GRE: + l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; break; default: return; @@ -2250,18 +2326,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -2273,8 +2348,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; + } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; } } else { network_hdr_len = skb_network_header_len(skb); @@ -2284,12 +2368,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -2298,7 +2382,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -2396,9 +2480,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * Returns 0 if stop is not needed **/ #ifdef I40E_FCOE -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #else -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #endif { if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) @@ -2473,13 +2557,13 @@ linearize_chk_done: * @td_offset: offset for checksum or crc **/ #ifdef I40E_FCOE -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -#else -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, +inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) #endif { unsigned int data_len = skb->data_len; @@ -2491,6 +2575,9 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -2531,6 +2618,8 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -2550,6 +2639,8 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -2564,37 +2655,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* set the timestamp */ - first->time_stamp = jiffies; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -2604,12 +2664,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) + if (!tail_bump) + prefetchw(tx_desc + 1); + + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); writel(i, tx_ring->tail); + } return; @@ -2640,11 +2759,11 @@ dma_error: * one descriptor. **/ #ifdef I40E_FCOE -int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static int i40e_xmit_descriptor_count(struct sk_buff *skb, +inline int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) +#else +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) #endif { unsigned int f; @@ -2687,6 +2806,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u8 hdr_len = 0; int tsyn; int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2706,7 +2826,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -2719,10 +2839,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags)) + if (i40e_chk_linearize(skb, tx_flags)) { if (skb_linearize(skb)) goto out_drop; - + tx_ring->tx_stats.tx_linearize++; + } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ @@ -2732,7 +2853,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 4b0b8102c..6779fb771 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -66,17 +78,29 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,16 +153,17 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_TSYN BIT(8) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -146,13 +171,13 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; }; unsigned int bytecount; unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; @@ -176,6 +201,7 @@ struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; + u64 tx_linearize; }; struct i40e_rx_queue_stats { @@ -187,8 +213,6 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_TX_DETECT_HANG, - __I40E_HANG_CHECK_ARMED, __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; @@ -199,12 +223,6 @@ enum i40e_ring_state_t { set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define clear_ring_ps_enabled(ring) \ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define check_for_tx_hang(ring) \ - test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ @@ -252,6 +270,12 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; + + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) /* stats structs */ struct i40e_queue_stats stats; @@ -274,6 +298,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { @@ -310,4 +335,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); +u32 i40e_get_tx_pending(struct i40e_ring *ring); + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} #endif /* _I40E_TXRX_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h index 568e855da..dd2da356d 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -33,24 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -120,6 +103,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -151,14 +136,14 @@ enum i40e_set_fc_aq_failures { }; enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1, - I40E_VSI_VMDQ2, - I40E_VSI_CTRL, - I40E_VSI_FCOE, - I40E_VSI_MIRROR, - I40E_VSI_SRIOV, - I40E_VSI_FDIR, + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; @@ -182,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -213,7 +247,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -270,6 +314,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -288,12 +333,17 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, }; /* nvm_access definition and its masks/shifts need to be accessible to @@ -312,6 +362,7 @@ enum i40e_nvmupd_state { #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -392,6 +443,8 @@ struct i40e_fc_info { #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 +#define I40E_CEE_APP_SEL_ETHTYPE 0x0 +#define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { @@ -422,7 +475,10 @@ struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; struct i40e_dcb_ets_config etsrec; struct i40e_dcb_pfc_config pfc; @@ -474,6 +530,8 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_virt_mem nvm_buff; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -482,16 +540,22 @@ struct i40e_hw { u16 dcbx_status; /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; - struct i40e_dcbx_config remote_dcbx_config; + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) + u64 flags; /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -588,19 +652,23 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -608,8 +676,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -743,8 +811,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -920,12 +987,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -937,6 +1004,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -955,15 +1024,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, @@ -1009,14 +1087,17 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) @@ -1069,6 +1150,14 @@ struct i40e_eth_stats { u64 tx_errors; /* tepc */ }; +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + #ifdef I40E_FCOE /* Statistics collected per function for FCoE */ struct i40e_fcoe_stats { @@ -1133,6 +1222,9 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; @@ -1145,6 +1237,8 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_BOOT_CONFIG_PTR 0x17 +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 2d20af290..ae8798260 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -81,7 +81,6 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, - I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue @@ -110,7 +109,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +130,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +145,14 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4e9376da0..44462b40f 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) { - struct i40e_hw *hw = &pf->hw; - u32 reg; - - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); } /** @@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, } tempmap = vecmap->rxq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * - vsi_queue_id)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id)); } tempmap = vecmap->txq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id - + 1)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id + 1)); } next_q = find_first_bit(&linklistmap, @@ -337,11 +330,23 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, reg = (vector_id) | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | - (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); wr32(hw, reg_idx, reg); } + /* if the vf is running in polling mode and using interrupt zero, + * need to disable auto-mask on enabling zero interrupt for VFs. + */ + if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && + (vector_id == 0)) { + reg = rd32(hw, I40E_GLINT_CTL); + if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { + reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + wr32(hw, I40E_GLINT_CTL, reg); + } + } + irq_list_done: i40e_flush(hw); } @@ -531,6 +536,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) } if (type == I40E_VSI_SRIOV) { u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -541,20 +547,25 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) */ if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id, true, false); + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF MAC addr\n"); - f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + f = i40e_add_filter(vsi, brdcast, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); + spin_unlock_bh(&vsi->mac_filter_list_lock); } /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi); + ret = i40e_sync_vsi_filters(vsi, false); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -598,6 +609,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VF queues */ for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); total_queue_pairs++; @@ -694,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) */ vf->num_queue_pairs = 0; vf->vf_states = 0; + clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); } /** @@ -832,10 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) complete_reset: /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); - i40e_alloc_vf_res(vf); - i40e_enable_vf_mappings(vf); - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - + if (!i40e_alloc_vf_res(vf)) { + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); @@ -864,6 +878,11 @@ void i40e_free_vfs(struct i40e_pf *pf) i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], false); + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + false); + /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank * the carpet out from underneath their feet. @@ -899,7 +918,7 @@ void i40e_free_vfs(struct i40e_pf *pf) for (vf_id = 0; vf_id < tmp; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } clear_bit(__I40E_VF_DISABLE, &pf->state); @@ -925,8 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { - dev_err(&pf->pdev->dev, - "Failed to enable SR-IOV, error %d.\n", ret); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->num_alloc_vfs = 0; goto err_iov; } @@ -951,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable VF vplan_qtable mappings */ - i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; @@ -980,17 +996,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; - dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); + if (test_bit(__I40E_TESTING, &pf->state)) { + dev_warn(&pdev->dev, + "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); + err = -EPERM; + goto err_out; + } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) goto out; if (num_vfs > pf->num_req_vfs) { + dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", + num_vfs, pf->num_req_vfs); err = -EPERM; goto err_out; } + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); err = i40e_alloc_vfs(pf, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); @@ -1081,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, } } else { vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_invalid_msgs = 0; } aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, @@ -1116,12 +1143,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, * * called from the VF to request the API version used by the PF **/ -static int i40e_vc_get_version_msg(struct i40e_vf *vf) +static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_version_info info = { I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR }; + vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(vf)) + info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, sizeof(struct @@ -1136,7 +1167,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) * * called from the VF to request its resources **/ -static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) +static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; @@ -1160,11 +1191,27 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) len = 0; goto err; } + if (VF_IS_V11(vf)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + } else { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + } + + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; @@ -1172,10 +1219,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) if (vf->lan_vsi_idx) { vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; - vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; - memcpy(vfres->vsi_res[i].default_mac_addr, - vf->default_lan_addr.addr, ETH_ALEN); + vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; + /* VFs only use TC 0 */ + vfres->vsi_res[i].qset_handle + = le16_to_cpu(vsi->info.qs_handle[0]); + ether_addr_copy(vfres->vsi_res[i].default_mac_addr, + vf->default_lan_addr.addr); i++; } set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); @@ -1553,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + /* Lock once, because all function inside for loop accesses VSI's + * MAC filter list which needs to be protected using same lock. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -1571,12 +1625,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to add VF MAC filter\n"); ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; } } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi)) + if (i40e_sync_vsi_filters(vsi, false)) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: @@ -1621,13 +1677,15 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) i40e_del_filter(vsi, al->list[i].addr, I40E_VLAN_ANY, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi)) + if (i40e_sync_vsi_filters(vsi, false)) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: @@ -1679,6 +1737,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (ret) dev_err(&pf->pdev->dev, "Unable to add VF vlan filter %d, error %d\n", @@ -1730,6 +1789,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (ret) dev_err(&pf->pdev->dev, "Unable to delete VF vlan filter %d, error %d\n", @@ -1766,9 +1826,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len = sizeof(struct i40e_virtchnl_version_info); break; case I40E_VIRTCHNL_OP_RESET_VF: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: valid_len = 0; break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(vf)) + valid_len = sizeof(u32); + else + valid_len = 0; + break; case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct i40e_virtchnl_txq_info); break; @@ -1836,7 +1901,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case I40E_VIRTCHNL_OP_UNKNOWN: default: return -EPERM; - break; } /* few more checks */ if ((valid_len != msglen) || (err_msg_format)) { @@ -1881,10 +1945,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, switch (v_opcode) { case I40E_VIRTCHNL_OP_VERSION: - ret = i40e_vc_get_version_msg(vf); + ret = i40e_vc_get_version_msg(vf, msg); break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: - ret = i40e_vc_get_vf_resources_msg(vf); + ret = i40e_vc_get_vf_resources_msg(vf, msg); break; case I40E_VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); @@ -1962,9 +2026,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); - if (reg & (1 << bit_idx)) { + if (reg & BIT(bit_idx)) { /* clear the bit in GLGEN_VFLRSTAT */ - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); if (!test_bit(__I40E_DOWN, &pf->state)) i40e_reset_vf(vf, true); @@ -2015,8 +2079,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + /* Lock once because below invoked function add/del_filter requires + * mac_filter_list_lock to be held + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); /* Delete all the filters for this VSI - we're going to kill it @@ -2025,9 +2095,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ - if (i40e_sync_vsi_filters(vsi)) { + if (i40e_sync_vsi_filters(vsi, false)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; @@ -2054,8 +2126,10 @@ error_param: int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) { + u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + bool is_vsi_in_vlan = false; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; @@ -2081,7 +2155,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } - if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { + if (le16_to_cpu(vsi->info.pvid) == vlanprio) + /* duplicate request, so just return success */ + goto error_pvid; + + spin_lock_bh(&vsi->mac_filter_list_lock); + is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); @@ -2101,7 +2183,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, * MAC addresses deleted. */ if ((!(vlan_id || qos) || - (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && + vlanprio != le16_to_cpu(vsi->info.pvid)) && vsi->info.pvid) ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); @@ -2116,8 +2198,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, } } if (vlan_id || qos) - ret = i40e_vsi_add_pvid(vsi, - vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); + ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); @@ -2270,7 +2351,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ivi->vf = vf_id; - memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); + ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 09043c1aa..da44995de 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -29,8 +29,6 @@ #include "i40e.h" -#define I40E_MAX_MACVLAN_FILTERS 256 -#define I40E_MAX_VLAN_FILTERS 256 #define I40E_MAX_VLANID 4095 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 @@ -42,6 +40,9 @@ #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0x7000 +#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0)) +#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1)) + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, @@ -75,6 +76,8 @@ struct i40e_vf { u16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; + struct i40e_virtchnl_version_info vf_ver; + u32 driver_caps; /* reported by VF driver */ /* VF Port Extender (PE) stag if used */ u16 stag; @@ -93,7 +96,8 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ - u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ + /* num of continuous malformed or invalid msgs detected */ + u64 num_invalid_msgs; u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index c1d25f8c1..3f65e39b3 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -60,17 +60,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; - } else { - hw->aq.asq.tail = I40E_PF_ATQT; - hw->aq.asq.head = I40E_PF_ATQH; - hw->aq.asq.len = I40E_PF_ATQLEN; - hw->aq.asq.bal = I40E_PF_ATQBAL; - hw->aq.asq.bah = I40E_PF_ATQBAH; - hw->aq.arq.tail = I40E_PF_ARQT; - hw->aq.arq.head = I40E_PF_ARQH; - hw->aq.arq.len = I40E_PF_ARQLEN; - hw->aq.arq.bal = I40E_PF_ARQBAL; - hw->aq.arq.bah = I40E_PF_ARQBAH; } } @@ -308,7 +297,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | - I40E_PF_ATQLEN_ATQENABLE_MASK)); + I40E_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); @@ -337,7 +326,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | - I40E_PF_ARQLEN_ARQENABLE_MASK)); + I40E_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); @@ -384,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -402,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -443,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -461,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -480,8 +469,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.asq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); @@ -490,16 +483,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.asq_mutex); - hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); +shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); - return ret_code; } @@ -513,8 +503,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) { i40e_status ret_code = 0; - if (hw->aq.arq.count == 0) - return I40E_ERR_NOT_READY; + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); @@ -523,16 +517,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); - /* make sure lock is available */ - mutex_lock(&hw->aq.arq_mutex); - hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); +shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); - return ret_code; } @@ -560,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -605,7 +592,8 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); return ret_code; } @@ -628,8 +616,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "%s: ntc %d head %d.\n", __func__, ntc, - rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -693,19 +680,23 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, u16 retval = 0; u32 val = 0; - val = rd32(hw, hw->aq.asq.head); - if (val >= hw->aq.num_asq_entries) { + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: head overrun at %d\n", val); + "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } - if (hw->aq.asq.count == 0) { + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Admin queue not initialized.\n"); + "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_exit; + goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); @@ -730,8 +721,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); - mutex_lock(&hw->aq.asq_mutex); - if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, @@ -841,6 +830,10 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + /* save writeback aq if requested */ + if (details->wb_desc) + *details->wb_desc = *desc_on_ring; + /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { @@ -852,7 +845,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); -asq_send_command_exit: return status; } @@ -898,8 +890,15 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ - ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -959,6 +958,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index ef43d68f6..a3eae5d9a 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -69,6 +69,7 @@ struct i40e_asq_cmd_details { u16 flags_dis; bool async; bool postpone; + struct i40e_aq_desc *wb_desc; }; #define I40E_ADMINQ_DETAILS(R, i) \ @@ -108,9 +109,10 @@ struct i40e_adminq_info { /** * i40e_aq_rc_to_posix - convert errors to user-land codes - * aq_rc: AdminQ error code to convert + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; - if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; + return aq_to_posix[aq_rc]; } diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e715bccfb..fcb9ef34c 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,8 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 -#define I40E_FW_API_VERSION_A0_MINOR 0x0000 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -133,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -260,7 +254,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -272,8 +269,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -507,7 +502,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -530,7 +526,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -824,8 +822,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -1066,6 +1068,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -1716,11 +1719,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; @@ -2093,6 +2098,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure_A0 { diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_common.c b/kernel/drivers/net/ethernet/intel/i40evf/i40e_common.c index 39fcb1dc4..72b1942a9 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -51,9 +51,20 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -72,6 +83,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) } /** + * i40evf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40evf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** * i40evf_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask @@ -114,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -146,7 +349,7 @@ bool i40evf_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); + I40E_VF_ATQLEN1_ATQENABLE_MASK); else return false; } @@ -177,6 +380,164 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40evf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40evf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the @@ -612,10 +973,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == I40E_VSI_SRIOV) { - memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, - ETH_ALEN); - memcpy(hw->mac.addr, vsi_res->default_mac_addr, - ETH_ALEN); + ether_addr_copy(hw->mac.perm_addr, + vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, + vsi_res->default_mac_addr); } vsi_res++; } diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_devids.h new file mode 100644 index 000000000..e6a39c986 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 931c88044..00ed24bfc 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 21a91b14b..5e314fd3c 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -34,7 +34,7 @@ #include <linux/pci.h> /* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> /* File to be the magic between shared code and * actual OS primitives diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 58e37a44b..cbd9a1b07 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -60,6 +60,19 @@ void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); i40e_status i40e_set_mac_type(struct i40e_hw *hw); @@ -88,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_register.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_register.h index 3cc737629..10febcfd7 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -27,1580 +27,6 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ -#define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ -#define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ -#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ -#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ -#define I40E_GL_ATQH_ATQH_SHIFT 0 -#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) -#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ -#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) -#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) -#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) -#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) -#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) -#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ -#define I40E_GL_ATQT_ATQT_SHIFT 0 -#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) -#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ -#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) -#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ -#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) -#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ -#define I40E_PF_ARQH_ARQH_SHIFT 0 -#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) -#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ -#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) -#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) -#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ -#define I40E_PF_ARQT_ARQT_SHIFT 0 -#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) -#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ -#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) -#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ -#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) -#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ -#define I40E_PF_ATQH_ATQH_SHIFT 0 -#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) -#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ -#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) -#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) -#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ -#define I40E_PF_ATQT_ATQT_SHIFT 0 -#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) -#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAH_MAX_INDEX 127 -#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAL_MAX_INDEX 127 -#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) -#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQH_MAX_INDEX 127 -#define I40E_VF_ARQH_ARQH_SHIFT 0 -#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) -#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQLEN_MAX_INDEX 127 -#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQT_MAX_INDEX 127 -#define I40E_VF_ARQT_ARQT_SHIFT 0 -#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) -#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAH_MAX_INDEX 127 -#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAL_MAX_INDEX 127 -#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) -#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQH_MAX_INDEX 127 -#define I40E_VF_ATQH_ATQH_SHIFT 0 -#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) -#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQLEN_MAX_INDEX 127 -#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQT_MAX_INDEX 127 -#define I40E_VF_ATQT_ATQT_SHIFT 0 -#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) -#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ -#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 -#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) -#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 -#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) -#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 -#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) -#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 -#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 -#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) -#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) -#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) -#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ -#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 -#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) -#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) -#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 -#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) -#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) -#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 -#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 -#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) -#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ -#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 -#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) -#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 -#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 -#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 -#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) -#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 -#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) -#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ -#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 -#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) -#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ -#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 -#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) -#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 -#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 -#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) -#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 -#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 -#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) -#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ -#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 -#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) -#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 -#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) -#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 -#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 -#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) -#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) -#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ -#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 -#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 -#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) -#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ -#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 -#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) -#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ -#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 -#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 -#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 -#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 -#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 -#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 -#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 -#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 -#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) -#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 -#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 -#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) -#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ -#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ -#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ -#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 -#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) -#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) -#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) -#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ -#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 -#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 -#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 -#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 -#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 -#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 -#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 -#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 -#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 -#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) -#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) -#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ -#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 -#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) -#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 -#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) -#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 -#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) -#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 -#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) -#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ -#define I40E_GL_FWSTS_FWS0B_SHIFT 0 -#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) -#define I40E_GL_FWSTS_FWRI_SHIFT 9 -#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) -#define I40E_GL_FWSTS_FWS1B_SHIFT 16 -#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) -#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ -#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 -#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) -#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ -#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 -#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 -#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 -#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) -#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) -#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 -#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) -#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) -#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) -#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 -#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 -#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) -#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 -#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) -#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 -#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) -#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 -#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) -#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 -#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) -#define I40E_GLGEN_I2CCMD_R_SHIFT 29 -#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) -#define I40E_GLGEN_I2CCMD_E_SHIFT 31 -#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) -#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 -#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 -#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 -#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 -#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) -#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) -#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) -#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSCA_MAX_INDEX 3 -#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 -#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) -#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 -#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) -#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 -#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) -#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 -#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 -#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) -#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) -#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSRWD_MAX_INDEX 3 -#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 -#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) -#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 -#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) -#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) -#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ -#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 -#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) -#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 -#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) -#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 -#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) -#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 -#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 -#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 -#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) -#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ -#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 -#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ -#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 -#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) -#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 -#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) -#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 -#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) -#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ -#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 -#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) -#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 -#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) -#define I40E_GLGEN_STAT_VTEN_SHIFT 3 -#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) -#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 -#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) -#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 -#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) -#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 -#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) -#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 -#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 -#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) -#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ -#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 -#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) -#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ -#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 -#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) -#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ -#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 -#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) -#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ -#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ -#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 -#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) -#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 -#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) -#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 -#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) -#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 -#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) -#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ -#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 -#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) -#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ -#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 -#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) -#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 -#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) -#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 -#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 -#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) -#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 -#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 -#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) -#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 -#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 -#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) -#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 -#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 -#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) -#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 -#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) -#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) -#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) -#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 -#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) -#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) -#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 -#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) -#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) -#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) -#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 -#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) -#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) -#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) -#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) -#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) -#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) -#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) -#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ -#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 -#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) -#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 -#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 -#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) -#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) -#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) -#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 -#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 -#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) -#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 -#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) -#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) -#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) -#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) -#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_SDPART_MAX_INDEX 15 -#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) -#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) -#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) -#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ -#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) -#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 -#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) -#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ -#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) -#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 -#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) -#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) -#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ -#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 -#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) -#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ -#define I40E_GL_GP_FUSE_MAX_INDEX 28 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) -#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) -#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 -#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) -#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 -#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) -#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) -#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 -#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) -#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ -#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) -#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_PFINT_CEQCTL_MAX_INDEX 511 -#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) -#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ -#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) -#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 -#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) -#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ -#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 -#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 -#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 -#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 -#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) -#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 -#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) -#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 -#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) -#define I40E_PFINT_ICR0_GRST_SHIFT 20 -#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) -#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 -#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) -#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 -#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 -#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) -#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 -#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) -#define I40E_PFINT_ICR0_VFLR_SHIFT 29 -#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) -#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_SWINT_SHIFT 31 -#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) -#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ -#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 -#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 -#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 -#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) -#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 -#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) -#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 -#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 -#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 -#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 -#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) -#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ -#define I40E_PFINT_ITR0_MAX_INDEX 2 -#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) -#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_ITRN_MAX_INDEX 2 -#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) -#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 -#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ -#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) -#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_RATEN_MAX_INDEX 511 -#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) -#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) -#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_RQCTL_MAX_INDEX 1535 -#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 -#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) -#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 -#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) -#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) -#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) -#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) -#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 -#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) -#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_TQCTL_MAX_INDEX 1535 -#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 -#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) -#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 -#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) -#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) -#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) -#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) -#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 -#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) -#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 -#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_MAX_INDEX 127 -#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_SWINT_SHIFT 31 -#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) -#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_ITR0_MAX_INDEX 2 -#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN_MAX_INDEX 2 -#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPINT_AEQCTL_MAX_INDEX 127 -#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) -#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_VPINT_CEQCTL_MAX_INDEX 511 -#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) -#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLST0_MAX_INDEX 127 -#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 -#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 -#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_RATE0_MAX_INDEX 127 -#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) -#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_RATEN_MAX_INDEX 511 -#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) -#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) -#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 -#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) -#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ -#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 -#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) -#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) -#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) -#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ -#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 -#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 -#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 -#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) -#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ -#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 -#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) -#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 -#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) -#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) -#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QRX_ENA_MAX_INDEX 1535 -#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 -#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) -#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) -#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 -#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) -#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QRX_TAIL_MAX_INDEX 1535 -#define I40E_QRX_TAIL_TAIL_SHIFT 0 -#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) -#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_CTL_MAX_INDEX 1535 -#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 -#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) -#define I40E_QTX_CTL_PF_INDX_SHIFT 2 -#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) -#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 -#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) -#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_ENA_MAX_INDEX 1535 -#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 -#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) -#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) -#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 -#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) -#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_HEAD_MAX_INDEX 1535 -#define I40E_QTX_HEAD_HEAD_SHIFT 0 -#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) -#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 -#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) -#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_TAIL_MAX_INDEX 1535 -#define I40E_QTX_TAIL_TAIL_SHIFT 0 -#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) -#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_MAPENA_MAX_INDEX 127 -#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 -#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) -#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QTABLE_MAX_INDEX 15 -#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 -#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) -#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QBASE_MAX_INDEX 383 -#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 -#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) -#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 -#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) -#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QTABLE_MAX_INDEX 7 -#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 -#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) -#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 -#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) -#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ -#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 -#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) -#define I40E_PRTGL_SAH_MFS_SHIFT 16 -#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) -#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ -#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 -#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) -#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ -#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 -#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) -#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) -#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 -#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) -#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) -#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) -#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 -#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 -#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) -#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 -#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 -#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 -#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 -#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) -#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 -#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 -#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) -#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 -#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 -#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) -#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_METF_MAX_INDEX 3 -#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 -#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) -#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 -#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) -#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) -#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 -#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) -#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 -#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) -#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 -#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) -#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 -#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) -#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 -#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) -#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 -#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) -#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) -#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) -#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ -#define I40E_MSIX_PBA_MAX_INDEX 5 -#define I40E_MSIX_PBA_PENBIT_SHIFT 0 -#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) -#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TADD_MAX_INDEX 128 -#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) -#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TMSG_MAX_INDEX 128 -#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TUADD_MAX_INDEX 128 -#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TVCTRL_MAX_INDEX 128 -#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) #define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ #define I40E_VFMSIX_PBA1_MAX_INDEX 19 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 @@ -1623,1525 +49,6 @@ #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 #define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ -#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 -#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) -#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 -#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) -#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 -#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) -#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 -#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) -#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 -#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) -#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 -#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) -#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 -#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) -#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 -#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) -#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 -#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) -#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 -#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) -#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ -#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 -#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) -#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 -#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) -#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ -#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 -#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) -#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 -#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) -#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 -#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) -#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 -#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) -#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 -#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) -#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ -#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) -#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ -#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 -#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) -#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 -#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) -#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 -#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) -#define I40E_GLNVM_SRCTL_START_SHIFT 30 -#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) -#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) -#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ -#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 -#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) -#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 -#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) -#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 -#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 -#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 -#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 -#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) -#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ -#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 -#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) -#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ -#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 -#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) -#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 -#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 -#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 -#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 -#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 -#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 -#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 -#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 -#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) -#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ -#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 -#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) -#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 -#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) -#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ -#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 -#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) -#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 -#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) -#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 -#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) -#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ -#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 -#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) -#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) -#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) -#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) -#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ -#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 -#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) -#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 -#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) -#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 -#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) -#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) -#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) -#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) -#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ -#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 -#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 -#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) -#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 -#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) -#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ -#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 -#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 -#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 -#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 -#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) -#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ -#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 -#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) -#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ -#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 -#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) -#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ -#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 -#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) -#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) -#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ -#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 -#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) -#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ -#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 -#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) -#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ -#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 -#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) -#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ -#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 -#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) -#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 -#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) -#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 -#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) -#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ -#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 -#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) -#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 -#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) -#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ -#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 -#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) -#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ -#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 -#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) -#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 -#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) -#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 -#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) -#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ -#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 -#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) -#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 -#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) -#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 -#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) -#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 -#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) -#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ -#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 -#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) -#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 -#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) -#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) -#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ -#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) -#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) -#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) -#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ -#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 -#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) -#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ -#define I40E_PFPCI_PM_PME_EN_SHIFT 0 -#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) -#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ -#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 -#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) -#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ -#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 -#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) -#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ -#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 -#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) -#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ -#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 -#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) -#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 -#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 -#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) -#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 -#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) -#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) -#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ -#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 -#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) -#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 -#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) -#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ -#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 -#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) -#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ -#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 -#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) -#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 -#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) -#define I40E_PRTPM_GC_RATD_SHIFT 2 -#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) -#define I40E_PRTPM_GC_LCDMP_SHIFT 3 -#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) -#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 -#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) -#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ -#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 -#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) -#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ -#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 -#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) -#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ -#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 -#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) -#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ -#define I40E_GLRPB_GHW_GHW_SHIFT 0 -#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) -#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ -#define I40E_GLRPB_GLW_GLW_SHIFT 0 -#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) -#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ -#define I40E_GLRPB_PHW_PHW_SHIFT 0 -#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) -#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ -#define I40E_GLRPB_PLW_PLW_SHIFT 0 -#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) -#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DHW_MAX_INDEX 7 -#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 -#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) -#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DLW_MAX_INDEX 7 -#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 -#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) -#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DPS_MAX_INDEX 7 -#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 -#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) -#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SHT_MAX_INDEX 7 -#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 -#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) -#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ -#define I40E_PRTRPB_SHW_SHW_SHIFT 0 -#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) -#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SLT_MAX_INDEX 7 -#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 -#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) -#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ -#define I40E_PRTRPB_SLW_SLW_SHIFT 0 -#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) -#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ -#define I40E_PRTRPB_SPS_SPS_SHIFT 0 -#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) -#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ -#define I40E_GLQF_CTL_HTOEP_SHIFT 1 -#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) -#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 -#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) -#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 -#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) -#define I40E_GLQF_CTL_RSVD_SHIFT 7 -#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) -#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 -#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 -#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 -#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) -#define I40E_GLQF_CTL_FDBEST_SHIFT 17 -#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) -#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 -#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) -#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 -#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) -#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 -#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) -#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ -#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 -#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) -#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 -#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) -#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_GLQF_HKEY_MAX_INDEX 12 -#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 -#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) -#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 -#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) -#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 -#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) -#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 -#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) -#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HSYM_MAX_INDEX 63 -#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 -#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) -#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_GLQF_PCNT_MAX_INDEX 511 -#define I40E_GLQF_PCNT_PCNT_SHIFT 0 -#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) -#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_SWAP_MAX_INDEX 1 -#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 -#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 -#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 -#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 -#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 -#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 -#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) -#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ -#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 -#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 -#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 -#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 -#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) -#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 -#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) -#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 -#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) -#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 -#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) -#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 -#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) -#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 -#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 -#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) -#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ -#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 -#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) -#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ -#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 -#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) -#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 -#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) -#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ -#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 -#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) -#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 -#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) -#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_PFQF_HENA_MAX_INDEX 1 -#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_PFQF_HKEY_MAX_INDEX 12 -#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) -#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) -#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) -#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) -#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_PFQF_HLUT_MAX_INDEX 127 -#define I40E_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) -#define I40E_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) -#define I40E_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) -#define I40E_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ -#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 -#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) -#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 -#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 -#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) -#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 -#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 -#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) -#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) -#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ -#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 -#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) -#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 -#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) -#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HENA1_MAX_INDEX 1 -#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY1_MAX_INDEX 12 -#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) -#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) -#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) -#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) -#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT1_MAX_INDEX 15 -#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) -#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) -#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) -#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) -#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION1_MAX_INDEX 7 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) -#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPQF_CTL_MAX_INDEX 127 -#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 -#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) -#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 -#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) -#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 -#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) -#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 -#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) -#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_CTL_MAX_INDEX 383 -#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 -#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) -#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 -#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 -#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 -#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 -#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 -#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) -#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_TCREGION_MAX_INDEX 3 -#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 -#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 -#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) -#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 -#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 -#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) -#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOECRC_MAX_INDEX 143 -#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 -#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) -#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDDPC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 -#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) -#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) -#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) -#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) -#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) -#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) -#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) -#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) -#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOELAST_MAX_INDEX 143 -#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 -#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) -#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPRC_MAX_INDEX 143 -#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 -#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) -#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPTC_MAX_INDEX 143 -#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 -#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) -#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOERPDC_MAX_INDEX 143 -#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 -#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) -#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR1_L_MAX_INDEX 143 -#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 -#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) -#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR2_L_MAX_INDEX 143 -#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 -#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) -#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) -#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) -#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) -#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) -#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 -#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 -#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) -#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCH_MAX_INDEX 3 -#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 -#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) -#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCL_MAX_INDEX 3 -#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 -#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) -#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCH_MAX_INDEX 3 -#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) -#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCL_MAX_INDEX 3 -#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) -#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 -#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 -#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) -#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LDPC_MAX_INDEX 3 -#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 -#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) -#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) -#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) -#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) -#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 -#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) -#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MLFC_MAX_INDEX 3 -#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 -#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) -#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCH_MAX_INDEX 3 -#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) -#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCL_MAX_INDEX 3 -#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) -#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCH_MAX_INDEX 3 -#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) -#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCL_MAX_INDEX 3 -#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) -#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MRFC_MAX_INDEX 3 -#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 -#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) -#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 -#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) -#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 -#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) -#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127H_MAX_INDEX 3 -#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 -#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) -#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127L_MAX_INDEX 3 -#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 -#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) -#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) -#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) -#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255H_MAX_INDEX 3 -#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 -#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) -#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255L_MAX_INDEX 3 -#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 -#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) -#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511H_MAX_INDEX 3 -#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 -#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) -#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511L_MAX_INDEX 3 -#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 -#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) -#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64H_MAX_INDEX 3 -#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 -#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) -#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64L_MAX_INDEX 3 -#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 -#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) -#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) -#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) -#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 -#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) -#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 -#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) -#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127H_MAX_INDEX 3 -#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 -#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) -#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127L_MAX_INDEX 3 -#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 -#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) -#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 -#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) -#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 -#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) -#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255H_MAX_INDEX 3 -#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 -#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) -#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255L_MAX_INDEX 3 -#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 -#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) -#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511H_MAX_INDEX 3 -#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 -#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) -#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511L_MAX_INDEX 3 -#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 -#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) -#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64H_MAX_INDEX 3 -#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 -#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) -#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64L_MAX_INDEX 3 -#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 -#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) -#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 -#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) -#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 -#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) -#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) -#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) -#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) -#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) -#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RDPC_MAX_INDEX 3 -#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 -#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) -#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RFC_MAX_INDEX 3 -#define I40E_GLPRT_RFC_RFC_SHIFT 0 -#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) -#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RJC_MAX_INDEX 3 -#define I40E_GLPRT_RJC_RJC_SHIFT 0 -#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) -#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RLEC_MAX_INDEX 3 -#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 -#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) -#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ROC_MAX_INDEX 3 -#define I40E_GLPRT_ROC_ROC_SHIFT 0 -#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) -#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUC_MAX_INDEX 3 -#define I40E_GLPRT_RUC_RUC_SHIFT 0 -#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) -#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUPP_MAX_INDEX 3 -#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 -#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) -#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) -#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDOLD_MAX_INDEX 3 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCH_MAX_INDEX 3 -#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) -#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCL_MAX_INDEX 3 -#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) -#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCH_MAX_INDEX 3 -#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) -#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCL_MAX_INDEX 3 -#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) -#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCH_MAX_INDEX 15 -#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) -#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCL_MAX_INDEX 15 -#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) -#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCH_MAX_INDEX 15 -#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) -#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCL_MAX_INDEX 15 -#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) -#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCH_MAX_INDEX 15 -#define I40E_GLSW_GORCH_GORCH_SHIFT 0 -#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) -#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCL_MAX_INDEX 15 -#define I40E_GLSW_GORCL_GORCL_SHIFT 0 -#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) -#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCH_MAX_INDEX 15 -#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) -#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCL_MAX_INDEX 15 -#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) -#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCH_MAX_INDEX 15 -#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) -#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCL_MAX_INDEX 15 -#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) -#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCH_MAX_INDEX 15 -#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) -#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCL_MAX_INDEX 15 -#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) -#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_RUPP_MAX_INDEX 15 -#define I40E_GLSW_RUPP_RUPP_SHIFT 0 -#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) -#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_TDPC_MAX_INDEX 15 -#define I40E_GLSW_TDPC_TDPC_SHIFT 0 -#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) -#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCH_MAX_INDEX 15 -#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) -#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCL_MAX_INDEX 15 -#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) -#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCH_MAX_INDEX 15 -#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) -#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCL_MAX_INDEX 15 -#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) -#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCH_MAX_INDEX 383 -#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) -#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCL_MAX_INDEX 383 -#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) -#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCH_MAX_INDEX 383 -#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) -#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCL_MAX_INDEX 383 -#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) -#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCH_MAX_INDEX 383 -#define I40E_GLV_GORCH_GORCH_SHIFT 0 -#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) -#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCL_MAX_INDEX 383 -#define I40E_GLV_GORCL_GORCL_SHIFT 0 -#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) -#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCH_MAX_INDEX 383 -#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) -#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCL_MAX_INDEX 383 -#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) -#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCH_MAX_INDEX 383 -#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) -#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCL_MAX_INDEX 383 -#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) -#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCH_MAX_INDEX 383 -#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) -#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCL_MAX_INDEX 383 -#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) -#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RDPC_MAX_INDEX 383 -#define I40E_GLV_RDPC_RDPC_SHIFT 0 -#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) -#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RUPP_MAX_INDEX 383 -#define I40E_GLV_RUPP_RUPP_SHIFT 0 -#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) -#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_TEPC_MAX_INDEX 383 -#define I40E_GLV_TEPC_TEPC_SHIFT 0 -#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) -#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCH_MAX_INDEX 383 -#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) -#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCL_MAX_INDEX 383 -#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) -#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCH_MAX_INDEX 383 -#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 -#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) -#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCL_MAX_INDEX 383 -#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) -#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) -#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) -#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) -#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) -#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) -#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) -#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) -#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) -#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 -#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) -#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 -#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) -#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 -#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) -#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 -#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) -#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 -#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) -#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 -#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) -#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) -#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) -#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) -#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ -#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 -#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) -#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 -#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) -#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 -#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 -#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 -#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) -#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 -#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 -#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) -#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) -#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) -#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 -#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 -#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 -#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 -#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) -#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ -#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 -#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) -#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 -#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 -#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) -#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) -#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) -#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 -#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) -#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 -#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) -#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) -#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) -#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ -#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 -#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 -#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 -#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) -#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ -#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 -#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 -#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) -#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) -#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) -#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) -#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) -#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) -#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ -#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 -#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) -#define I40E_GL_MDET_RX_EVENT_SHIFT 8 -#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) -#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 -#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) -#define I40E_GL_MDET_RX_VALID_SHIFT 31 -#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) -#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ -#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 -#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) -#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 -#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) -#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 -#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) -#define I40E_GL_MDET_TX_EVENT_SHIFT 25 -#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) -#define I40E_GL_MDET_TX_VALID_SHIFT 31 -#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) -#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ -#define I40E_PF_MDET_RX_VALID_SHIFT 0 -#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) -#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ -#define I40E_PF_MDET_TX_VALID_SHIFT 0 -#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) -#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ -#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 -#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) -#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 -#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) -#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) -#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_RX_MAX_INDEX 127 -#define I40E_VP_MDET_RX_VALID_SHIFT 0 -#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) -#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_TX_MAX_INDEX 127 -#define I40E_VP_MDET_TX_VALID_SHIFT 0 -#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) -#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ -#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 -#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) -#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 -#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) -#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 -#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) -#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 -#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) -#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 -#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) -#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ -#define I40E_PFPM_APM_APME_SHIFT 0 -#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ -#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 -#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) -#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ -#define I40E_PFPM_WUFC_LNKC_SHIFT 0 -#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) -#define I40E_PFPM_WUFC_MAG_SHIFT 1 -#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_PFPM_WUFC_MNG_SHIFT 3 -#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) -#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 -#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 -#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 -#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 -#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 -#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 -#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 -#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 -#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX0_SHIFT 16 -#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) -#define I40E_PFPM_WUFC_FLX1_SHIFT 17 -#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) -#define I40E_PFPM_WUFC_FLX2_SHIFT 18 -#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) -#define I40E_PFPM_WUFC_FLX3_SHIFT 19 -#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) -#define I40E_PFPM_WUFC_FLX4_SHIFT 20 -#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) -#define I40E_PFPM_WUFC_FLX5_SHIFT 21 -#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) -#define I40E_PFPM_WUFC_FLX6_SHIFT 22 -#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) -#define I40E_PFPM_WUFC_FLX7_SHIFT 23 -#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) -#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) -#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ -#define I40E_PFPM_WUS_LNKC_SHIFT 0 -#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) -#define I40E_PFPM_WUS_MAG_SHIFT 1 -#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) -#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 -#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) -#define I40E_PFPM_WUS_MNG_SHIFT 3 -#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) -#define I40E_PFPM_WUS_FLX0_SHIFT 16 -#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) -#define I40E_PFPM_WUS_FLX1_SHIFT 17 -#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) -#define I40E_PFPM_WUS_FLX2_SHIFT 18 -#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) -#define I40E_PFPM_WUS_FLX3_SHIFT 19 -#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) -#define I40E_PFPM_WUS_FLX4_SHIFT 20 -#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) -#define I40E_PFPM_WUS_FLX5_SHIFT 21 -#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) -#define I40E_PFPM_WUS_FLX6_SHIFT 22 -#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) -#define I40E_PFPM_WUS_FLX7_SHIFT 23 -#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) -#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) -#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ -#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 -#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) -#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 -#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) -#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAH_MAX_INDEX 3 -#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 -#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) -#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 -#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) -#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 -#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) -#define I40E_PRTPM_SAH_AV_SHIFT 31 -#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) -#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAL_MAX_INDEX 3 -#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 -#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 #define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) @@ -3366,4 +273,64 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 458fbb421..47e9a90d6 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } -/** - * i40e_get_tx_pending - how many tx descriptors not processed - * @tx_ring: the ring of descriptors - * - * Since there is no access to the ring head register - * in XL710, we need to use our local copies - **/ -static u32 i40e_get_tx_pending(struct i40e_ring *ring) -{ - u32 head, tail; - - head = i40e_get_head(ring); - tail = readl(ring->tail); - - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; -} - -/** - * i40e_check_tx_hang - Is there a hang in the Tx queue - * @tx_ring: the ring of descriptors - **/ -static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) -{ - u32 tx_done = tx_ring->stats.packets; - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = i40e_get_tx_pending(tx_ring); - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * PFC clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, - &tx_ring->state); - } else if (tx_done_old == tx_done && - (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { - /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_done; - clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - #define WB_STRIDE 0x3 /** @@ -304,40 +245,15 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ if (budget && !((i & WB_STRIDE) == WB_STRIDE) && !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; - else - tx_ring->arm_wb = false; - - if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" - " VSI <%d>\n" - " Tx Queue <%d>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n", - tx_ring->vsi->seid, - tx_ring->queue_index, - tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - dev_info(tx_ring->dev, - "tx hang detected on queue %d, resetting adapter\n", - tx_ring->queue_index); - - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); - - /* the adapter is about to reset, no point in enabling stuff */ - return true; - } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), @@ -359,32 +275,51 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } } - return budget > 0; + return !!budget; } /** - * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback * **/ -static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), - val); + u16 flags = q_vector->tx.ring[0].flags; + + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else { + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + } } /** * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -393,22 +328,33 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -420,61 +366,55 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: + case I40E_ULTRA_LATENCY: + default: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; break; } + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + + rc->latency_range = new_latency_range; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } - rc->total_bytes = 0; rc->total_packets = 0; -} -/** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } -/** +/* * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -488,6 +428,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!dev) return -ENOMEM; + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) @@ -648,6 +590,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) struct device *dev = rx_ring->dev; int bi_size; + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) @@ -828,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -873,7 +812,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -888,25 +827,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1003,7 +942,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_node_id(); + const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1027,7 +966,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1063,8 +1002,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1073,6 +1012,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) cleaned_count++; if (rx_hbo || rx_sph) { int len; + if (rx_hbo) len = I40E_RX_HDR_SIZE; else @@ -1116,7 +1056,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1126,11 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1144,7 +1081,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1156,7 +1093,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1206,7 +1142,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1224,7 +1160,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1242,17 +1178,14 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1266,12 +1199,11 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1285,6 +1217,94 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_VFINT_DYN_CTLN1 + +/** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + bool rx = false, tx = false; + u32 rxval, txval; + int vector; + + vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); + } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); + + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; + } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + +} + /** * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1303,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1316,43 +1336,50 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb |= ring->arm_wb; + ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) - i40e_force_wb(vsi, q_vector); + i40evf_force_wb(vsi, q_vector); return budget; } - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) - i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx); + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + i40e_update_enable_itr(vsi, q_vector); return 0; } /** - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1363,9 +1390,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1390,6 +1417,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) return -EINVAL; @@ -1408,16 +1436,14 @@ out: * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1468,12 +1494,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -1483,12 +1509,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -1498,18 +1529,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -1521,11 +1551,20 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); @@ -1534,12 +1573,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -1548,7 +1587,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -1672,7 +1711,44 @@ linearize_chk_done: } /** - * i40e_tx_map - Build the Tx descriptor + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -1681,9 +1757,9 @@ linearize_chk_done: * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -1789,9 +1865,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1808,8 +1881,14 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; @@ -1831,44 +1910,7 @@ dma_error: } /** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -1876,8 +1918,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) { unsigned int f; int count = 0; @@ -1892,7 +1934,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1918,11 +1960,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -1937,7 +1980,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -1945,10 +1988,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags)) + if (i40e_chk_linearize(skb, tx_flags)) { if (skb_linearize(skb)) goto out_drop; - + tx_ring->tx_stats.tx_linearize++; + } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ @@ -1958,17 +2002,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fb..ebc1bf77f 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -66,17 +78,29 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,15 +153,16 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -145,13 +170,13 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; }; unsigned int bytecount; unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; @@ -175,6 +200,7 @@ struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; + u64 tx_linearize; }; struct i40e_rx_queue_stats { @@ -186,8 +212,6 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_TX_DETECT_HANG, - __I40E_HANG_CHECK_ARMED, __I40E_RX_PS_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED, }; @@ -198,12 +222,6 @@ enum i40e_ring_state_t { set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define clear_ring_ps_enabled(ring) \ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define check_for_tx_hang(ring) \ - test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) #define ring_is_16byte_desc_enabled(ring) \ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define set_ring_16byte_desc_enabled(ring) \ @@ -250,6 +268,10 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) + /* stats structs */ struct i40e_queue_stats stats; struct u64_stats_sync syncp; @@ -271,6 +293,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_type.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a93..301fe2b6d 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -33,24 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -120,6 +103,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -151,14 +136,14 @@ enum i40e_set_fc_aq_failures { }; enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1, - I40E_VSI_VMDQ2, - I40E_VSI_CTRL, - I40E_VSI_FCOE, - I40E_VSI_MIRROR, - I40E_VSI_SRIOV, - I40E_VSI_FDIR, + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; @@ -182,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -213,7 +247,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -269,6 +313,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -287,12 +332,17 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, }; /* nvm_access definition and its masks/shifts need to be accessible to @@ -311,6 +361,7 @@ enum i40e_nvmupd_state { #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -417,6 +468,7 @@ struct i40e_ieee_app_priority_table { struct i40e_dcbx_config { u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ struct i40e_ieee_ets_config etscfg; struct i40e_ieee_ets_recommend etsrec; struct i40e_ieee_pfc_config pfc; @@ -468,6 +520,8 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_virt_mem nvm_buff; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -476,16 +530,19 @@ struct i40e_hw { u16 dcbx_status; /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; - struct i40e_dcbx_config remote_dcbx_config; + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -582,19 +639,23 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -602,8 +663,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -737,8 +798,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -914,12 +974,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -931,6 +991,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -949,15 +1011,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, @@ -1003,8 +1074,7 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) @@ -1063,6 +1133,14 @@ struct i40e_eth_stats { u64 tx_errors; /* tepc */ }; +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ @@ -1108,6 +1186,9 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; @@ -1118,6 +1199,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 59f62f0e6..9f7b279b9 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -81,7 +81,6 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, - I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue @@ -110,7 +109,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +130,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +145,14 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf.h b/kernel/drivers/net/ethernet/intel/i40evf/i40evf.h index 1b98c25b3..22fc3d49c 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -48,10 +48,6 @@ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "i40evf: " -#define DPRINTK(nlevel, klevel, fmt, args...) \ - ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ - printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ - __func__ , ## args))) /* dummy struct to make common code less painful */ struct i40e_vsi { @@ -70,6 +66,7 @@ struct i40e_vsi { */ u16 rx_itr_setting; u16 tx_itr_setting; + u16 qs_handle; }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -90,7 +87,7 @@ struct i40e_vsi { #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */ +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -101,6 +98,8 @@ struct i40e_vsi { #define MAX_RX_QUEUES 8 #define MAX_TX_QUEUES MAX_RX_QUEUES +#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ @@ -113,8 +112,11 @@ struct i40e_q_vector { struct i40e_ring_container tx; u32 ring_mask; u8 num_ringpairs; /* total number of ring pairs in vector */ +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 or 1 update ITR */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; + bool arm_wb_state; cpumask_var_t affinity_mask; }; @@ -207,33 +209,38 @@ struct i40evf_adapter { struct msix_entry *msix_entries; u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) -#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) -#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) -#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) -#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) -#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) -#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) -#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) -#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) -/* duplcates for common code */ +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) +#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) +#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) +#define I40EVF_FLAG_IMIR_ENABLED BIT(5) +#define I40EVF_FLAG_MQ_CAPABLE BIT(6) +#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) +#define I40EVF_FLAG_RESET_PENDING BIT(9) +#define I40EVF_FLAG_RESET_NEEDED BIT(10) +#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) +#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) +#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +/* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE /* flags for admin queue service task */ u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) -#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) -#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) +#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) +#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* OS defined structs */ struct net_device *netdev; @@ -249,8 +256,17 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; +#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct i40e_virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) u16 msg_enable; struct i40e_eth_stats current_stats; struct i40e_vsi vsi; @@ -264,7 +280,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); -void i40evf_reinit_locked(struct i40evf_adapter *adapter); +int i40evf_process_config(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index f4e77665b..4790437a5 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev, adapter->tx_desc_count = new_tx_count; adapter->rx_desc_count = new_rx_count; - if (netif_running(netdev)) - i40evf_reinit_locked(adapter); + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } return 0; } @@ -379,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -395,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, break; case TCP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -477,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -489,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -501,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -515,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -533,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -542,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c53aca4b..99d2cffae 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,10 +34,10 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.25" +#define DRV_VERSION "1.3.33" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2014 Intel Corporation."; + "Copyright (c) 2013 - 2015 Intel Corporation."; /* i40evf_pci_tbl - PCI Device ID Table * @@ -49,6 +49,7 @@ static const char i40evf_copyright[] = */ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, /* required last entry */ {0, } }; @@ -170,7 +171,8 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | + I40EVF_FLAG_RESET_NEEDED))) { adapter->flags |= I40EVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } @@ -202,7 +204,7 @@ static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); /* read flush */ rd32(hw, I40E_VFGEN_RSTAT); @@ -239,11 +241,11 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) int i; for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << (i - 1))) { + if (mask & BIT(i - 1)) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK); } } } @@ -261,17 +263,17 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); - dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << i)) { + if (mask & BIT(i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); - dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } } @@ -280,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) /** * i40evf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure + * @flush: boolean value whether to run rd32() **/ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) { @@ -303,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 val; - u32 ena_mask; /* handle non-queue interrupts */ - val = rd32(hw, I40E_VFINT_ICR01); - ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); - val = rd32(hw, I40E_VFINT_DYN_CTL01); - val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; + val = rd32(hw, I40E_VFINT_DYN_CTL01) | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, val); /* schedule work on the private workqueue */ @@ -332,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -355,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.ring = rx_ring; q_vector->rx.count++; q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } /** @@ -375,8 +378,9 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.ring = tx_ring; q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->num_ringpairs++; - q_vector->ring_mask |= (1 << t_idx); + q_vector->ring_mask |= BIT(t_idx); } /** @@ -405,7 +409,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) /* The ideal configuration... * We have enough vectors to map one per queue. */ - if (q_vectors == (rxr_remaining * 2)) { + if (q_vectors >= (rxr_remaining * 2)) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); @@ -442,6 +446,29 @@ out: return err; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * i40evf_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +static void i40evf_netpoll(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &adapter->vsi.state)) + return; + + for (i = 0; i < q_vectors; i++) + i40evf_msix_clean_rings(0, adapter->q_vector[i]); +} + +#endif /** * i40evf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure @@ -487,8 +514,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector); if (err) { dev_info(&adapter->pdev->dev, - "%s: request_irq failed, error: %d\n", - __func__, err); + "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* assign the mask for this irq */ @@ -729,6 +755,8 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (!VLAN_ALLOWED(adapter)) + return -EIO; if (i40evf_add_vlan(adapter, vid) == NULL) return -ENOMEM; return 0; @@ -744,8 +772,11 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - i40evf_del_vlan(adapter, vid); - return 0; + if (VLAN_ALLOWED(adapter)) { + i40evf_del_vlan(adapter, vid); + return 0; + } + return -EIO; } /** @@ -835,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; + if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + return -EPERM; + + f = i40evf_find_filter(adapter, hw->mac.addr); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); @@ -854,6 +894,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + struct netdev_hw_addr *ha; int count = 50; /* add addr if not already in the filter list */ @@ -875,27 +916,27 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - bool found = false; - - if (is_multicast_ether_addr(f->macaddr)) { - netdev_for_each_mc_addr(mca, netdev) { - if (ether_addr_equal(mca->addr, f->macaddr)) { - found = true; - break; - } - } - } else { - netdev_for_each_uc_addr(uca, netdev) { - if (ether_addr_equal(uca->addr, f->macaddr)) { - found = true; - break; - } - } - } - if (found) { - f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - } + netdev_for_each_mc_addr(mca, netdev) + if (ether_addr_equal(mca->addr, f->macaddr)) + goto bottom_of_search_loop; + + netdev_for_each_uc_addr(uca, netdev) + if (ether_addr_equal(uca->addr, f->macaddr)) + goto bottom_of_search_loop; + + for_each_dev_addr(netdev, ha) + if (ether_addr_equal(ha->addr, f->macaddr)) + goto bottom_of_search_loop; + + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) + goto bottom_of_search_loop; + + /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + +bottom_of_search_loop: + continue; } clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1107,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; + if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; adapter->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -1161,7 +1204,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; - i40evf_acquire_msix_vectors(adapter, v_budget); + err = i40evf_acquire_msix_vectors(adapter, v_budget); out: adapter->netdev->real_num_tx_queues = pairs; @@ -1169,6 +1212,113 @@ out: } /** + * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + int ret = 0, i; + u8 *rss_lut; + + if (!vsi->id) + return; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", + adapter->current_op); + return; + } + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); + if (!rss_lut) + return; + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) + rss_lut[i] = i % adapter->num_active_queues; + + ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return; + } + + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, + (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); + if (ret) + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); +} + +/** + * i40e_configure_rss_reg - Prepare for RSS if used + * @adapter: board private structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, + const u8 *seed) +{ + struct i40e_hw *hw = &adapter->hw; + u32 *seed_dw = (u32 *)seed; + u32 cqueue = 0; + u32 lut = 0; + int i, j; + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->num_active_queues) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } + wr32(hw, I40E_VFQF_HLUT(i), lut); + } + i40e_flush(hw); +} + +/** + * i40evf_configure_rss - Prepare for RSS + * @adapter: board private structure + **/ +static void i40evf_configure_rss(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + u8 seed[I40EVF_HKEY_ARRAY_SIZE]; + u64 hena; + + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + hena = I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + if (RSS_AQ(adapter)) + i40evf_configure_rss_aq(&adapter->vsi, seed); + else + i40evf_configure_rss_reg(adapter, seed); +} + +/** * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * @@ -1310,16 +1460,16 @@ static void i40evf_watchdog_task(struct work_struct *work) struct i40evf_adapter, watchdog_task); struct i40e_hw *hw = &adapter->hw; - uint32_t rstat_val; + u32 reg_val; if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) goto restart_watchdog; if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val == I40E_VFR_VFACTIVE) || - (rstat_val == I40E_VFR_COMPLETED)) { + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((reg_val == I40E_VFR_VFACTIVE) || + (reg_val == I40E_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); adapter->state = __I40EVF_STARTUP; @@ -1344,11 +1494,8 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; /* check for reset */ - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && - (rstat_val != I40E_VFR_VFACTIVE) && - (rstat_val != I40E_VFR_COMPLETED)) { + reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { adapter->state = __I40EVF_RESETTING; adapter->flags |= I40EVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); @@ -1368,6 +1515,10 @@ static void i40evf_watchdog_task(struct work_struct *work) } goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { + i40evf_send_vf_config_msg(adapter); + goto watchdog_done; + } if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { i40evf_disable_queues(adapter); @@ -1409,6 +1560,16 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + /* This message goes straight to the firmware, not the + * PF, so we don't have to set current_op as we will + * not get a response through the ARQ. + */ + i40evf_configure_rss(adapter); + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + goto watchdog_done; + } + if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: @@ -1431,47 +1592,8 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } -/** - * i40evf_configure_rss - Prepare for RSS - * @adapter: board private structure - **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) -{ - u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; - struct i40e_hw *hw = &adapter->hw; - u32 cqueue = 0; - u32 lut = 0; - int i, j; - u64 hena; - - /* Hash type is configured by the PF - we just supply the key */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); - - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->vsi_res->num_queue_pairs) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; - } - wr32(hw, I40E_VFQF_HLUT(i), lut); - } - i40e_flush(hw); -} - -#define I40EVF_RESET_WAIT_MS 100 -#define I40EVF_RESET_WAIT_COUNT 200 +#define I40EVF_RESET_WAIT_MS 10 +#define I40EVF_RESET_WAIT_COUNT 500 /** * i40evf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -1487,58 +1609,65 @@ static void i40evf_reset_task(struct work_struct *work) reset_task); struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; + struct i40evf_vlan_filter *vlf; struct i40evf_mac_filter *f; - uint32_t rstat_val; + u32 reg_val; int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); + i40evf_misc_irq_disable(adapter); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + /* Restart the AQ here. If we have been reset but didn't + * detect it, or if the PF had to reinit, our AQ will be hosed. + */ + i40evf_shutdown_adminq(hw); + i40evf_init_adminq(hw); i40evf_request_reset(adapter); } + adapter->flags |= I40EVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val != I40E_VFR_VFACTIVE) && - (rstat_val != I40E_VFR_COMPLETED)) + reg_val = rd32(hw, I40E_VF_ARQLEN1) & + I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!reg_val) break; - msleep(I40EVF_RESET_WAIT_MS); + usleep_range(5000, 10000); } if (i == I40EVF_RESET_WAIT_COUNT) { - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val == I40E_VFR_VFACTIVE) || - (rstat_val == I40E_VFR_COMPLETED)) + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (reg_val == I40E_VFR_VFACTIVE) break; msleep(I40EVF_RESET_WAIT_MS); } + /* extra wait to make sure minimum wait is met */ + msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { - struct i40evf_mac_filter *f, *ftmp; + struct i40evf_mac_filter *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; /* reset never finished */ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", - rstat_val); + reg_val); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_irq_disable(adapter); - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1550,6 +1679,7 @@ static void i40evf_reset_task(struct work_struct *work) list_del(&f->list); kfree(f); } + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { list_del(&fv->list); @@ -1564,22 +1694,27 @@ static void i40evf_reset_task(struct work_struct *work) i40evf_shutdown_adminq(hw); adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } continue_reset: - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - - i40evf_irq_disable(adapter); - if (netif_running(adapter->netdev)) { - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + i40evf_napi_disable_all(adapter); } + i40evf_irq_disable(adapter); adapter->state = __I40EVF_RESETTING; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + i40evf_free_all_rx_resources(adapter); + i40evf_free_all_tx_resources(adapter); /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) @@ -1590,19 +1725,21 @@ continue_reset: dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); - i40evf_map_queues(adapter); + adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } /* re-add all VLAN filters */ - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - f->add = true; + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; } - adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1624,7 +1761,10 @@ continue_reset: goto reset_err; i40evf_irq_enable(adapter, true); + } else { + adapter->state = __I40EVF_DOWN; } + return; reset_err: dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); @@ -1667,41 +1807,47 @@ static void i40evf_adminq_task(struct work_struct *work) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); } while (pending); + if ((adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || + adapter->state == __I40EVF_RESETTING) + goto freedom; + /* check for error indications */ val = rd32(hw, hw->aq.arq.len); oldval = val; - if (val & I40E_VF_ARQLEN_ARQVFE_MASK) { + if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQVFE_MASK; + val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; } - if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) { + if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK; + val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; } - if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) { + if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK; + val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val); val = rd32(hw, hw->aq.asq.len); oldval = val; - if (val & I40E_VF_ATQLEN_ATQVFE_MASK) { + if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQVFE_MASK; + val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; } - if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) { + if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK; + val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; } - if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) { + if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK; + val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); +freedom: kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ @@ -1743,8 +1889,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) if (!err) continue; dev_err(&adapter->pdev->dev, - "%s: Allocation for Tx Queue %u failed\n", - __func__, i); + "Allocation for Tx Queue %u failed\n", i); break; } @@ -1771,8 +1916,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) if (!err) continue; dev_err(&adapter->pdev->dev, - "%s: Allocation for Rx Queue %u failed\n", - __func__, i); + "Allocation for Rx Queue %u failed\n", i); break; } return err; @@ -1832,6 +1976,7 @@ static int i40evf_open(struct net_device *netdev) if (err) goto err_req_irq; + i40evf_add_filter(adapter, adapter->hw.mac.addr); i40evf_configure(adapter); err = i40evf_up_complete(adapter); @@ -1897,47 +2042,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) } /** - * i40evf_reinit_locked - Software reinit - * @adapter: board private structure - * - * Reinititalizes the ring structures in response to a software configuration - * change. Roughly the same as close followed by open, but skips releasing - * and reallocating the interrupts. - **/ -void i40evf_reinit_locked(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - WARN_ON(in_interrupt()); - - i40evf_down(adapter); - - /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); - if (err) - goto err_reinit; - - /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); - if (err) - goto err_reinit; - - i40evf_configure(adapter); - - err = i40evf_up_complete(adapter); - if (err) - goto err_reinit; - - i40evf_irq_enable(adapter, true); - return; - -err_reinit: - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); -} - -/** * i40evf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -1952,9 +2056,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) return -EINVAL; - /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - i40evf_reinit_locked(adapter); + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + return 0; } @@ -1970,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40evf_netpoll, +#endif }; /** @@ -1995,6 +2103,66 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) } /** + * i40evf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int i40evf_process_config(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (adapter->vf_res->vf_offload_flags + & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); + adapter->vsi.netdev = adapter->netdev; + adapter->vsi.qs_handle = adapter->vsi_res->qset_handle; + return 0; +} + +/** * i40evf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data * @@ -2012,10 +2180,9 @@ static void i40evf_init_task(struct work_struct *work) struct i40evf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40evf_mac_filter *f; struct i40e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - int i, err, bufsz; + int err, bufsz; switch (adapter->state) { case __I40EVF_STARTUP: @@ -2066,6 +2233,12 @@ static void i40evf_init_task(struct work_struct *work) if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) err = i40evf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + I40E_VIRTCHNL_VERSION_MAJOR, + I40E_VIRTCHNL_VERSION_MINOR); goto err; } err = i40evf_send_vf_config_msg(adapter); @@ -2101,60 +2274,26 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&pdev->dev, "No LAN VSI found\n"); + if (i40evf_process_config(adapter)) goto err_alloc; - } + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_GRO; - - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", adapter->hw.mac.addr); - random_ether_addr(adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto err_sw_init; - - ether_addr_copy(f->macaddr, adapter->hw.mac.addr); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - - list_add(&f->list, &adapter->mac_filter_list); init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; @@ -2170,24 +2309,17 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); - i40evf_configure_rss(adapter); + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + if (!RSS_AQ(adapter)) + i40evf_configure_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; netif_carrier_off(netdev); - adapter->vsi.id = adapter->vsi_res->vsi_id; - adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; - adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); - adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - if (!adapter->netdev_registered) { err = register_netdev(netdev); if (err) @@ -2206,10 +2338,16 @@ static void i40evf_init_task(struct work_struct *work) adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + + if (RSS_AQ(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + } else { + i40evf_configure_rss(adapter); + } return; restart: - schedule_delayed_work(&adapter->init_task, - msecs_to_jiffies(50)); + schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; err_register: @@ -2222,11 +2360,14 @@ err_alloc: err: /* Things went into the weeds, so try again later */ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); + dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - return; /* do not reschedule */ + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + schedule_delayed_work(&adapter->init_task, HZ * 5); + return; } - schedule_delayed_work(&adapter->init_task, HZ * 3); + schedule_delayed_work(&adapter->init_task, HZ); } /** @@ -2315,7 +2456,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; adapter->state = __I40EVF_STARTUP; /* Call save state here because it relies on the adapter struct. */ @@ -2335,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -2342,7 +2489,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); - schedule_delayed_work(&adapter->init_task, 10); + schedule_delayed_work(&adapter->init_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); return 0; @@ -2418,6 +2566,7 @@ static int i40evf_resume(struct pci_dev *pdev) rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); if (err) { + rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } @@ -2486,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev) if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 61e090558..32e620e1e 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) - dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", - op, err, hw->aq.asq_last_status); + dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", + op, i40evf_stat_str(hw, err), + i40evf_aq_str(hw, hw->aq.asq_last_status)); return err; } @@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; - if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || - (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + adapter->pf_version = *pf_vvi; + + if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) err = -EIO; out_alloc: @@ -145,8 +149,25 @@ out: **/ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) { - return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + u32 caps; + + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN | + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + if (PF_IS_V11(adapter)) + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); + else + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** @@ -214,8 +235,8 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; @@ -268,13 +289,13 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, @@ -293,13 +314,13 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, @@ -321,8 +342,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; @@ -373,8 +394,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); return; } list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -390,8 +411,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_ether_addr_list) + (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -433,8 +453,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", + adapter->current_op); return; } list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -450,8 +470,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_ether_addr_list) + (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -493,8 +512,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); return; } @@ -511,8 +530,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -552,8 +570,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); return; } @@ -570,8 +588,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -609,8 +626,8 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "%s: command %d pending\n", - __func__, adapter->current_op); + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); return; } adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -700,16 +717,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; default: - dev_err(&adapter->pdev->dev, - "%s: Unknown event %d from pf\n", - __func__, vpe->event); + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", + vpe->event); break; } return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", - __func__, v_retval, v_opcode); + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { @@ -729,6 +746,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_stats = *stats; } break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { + u16 len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * + sizeof(struct i40e_virtchnl_vsi_resource); + memcpy(adapter->vf_res, msg, min(msglen, len)); + i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + i40evf_process_config(adapter); + } + break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ i40evf_irq_enable(adapter, true); @@ -740,7 +768,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_free_all_rx_resources(adapter); break; case I40E_VIRTCHNL_OP_VERSION: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: /* Don't display an error if we get these out of sequence. * If the firmware needed to get kicked, we'll get these and diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c b/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c index 0f69ef817..7a73510e5 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,5 +1,5 @@ /* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. + * Copyright(c) 2007-2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (ret_val) return ret_val; - /* reset page to 0 */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); - if (ret_val) - return ret_val; if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_OTHER; @@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (port && (hw->dev_spec._82575.media_port != port)) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); } else { - ret_val = igb_check_for_link_82575(hw); + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; } return 0; @@ -223,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) /* Verify phy id and set remaining function pointers */ switch (phy->id) { case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1111_I_PHY_ID: @@ -235,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) else phy->ops.get_cable_length = igb_get_cable_length_m88; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - /* Check if this PHY is confgured for media swap. */ + /* Check if this PHY is configured for media swap. */ if (phy->id == M88E1112_E_PHY_ID) { u16 data; @@ -258,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->mac.ops.check_for_link = igb_check_for_link_media_swap; } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -889,6 +903,7 @@ out: **/ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { + struct e1000_phy_info *phy = &hw->phy; s32 ret_val; /* This isn't a true "hard" reset, but is the only reset @@ -905,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) goto out; ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); out: return ret_val; } @@ -1579,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); break; @@ -1900,8 +1920,8 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable * @hw: pointer to the HW structure * - * After rx enable if managability is enabled then there is likely some - * bad data at the start of the fifo and possibly in the DMA fifo. This + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ @@ -1910,6 +1930,11 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + if (hw->mac.type != e1000_82575 || !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; @@ -1937,7 +1962,6 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ - rfctl = rd32(E1000_RFCTL); wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = rd32(E1000_RLPML); @@ -2617,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) u16 phy_data; if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; if (!hw->dev_spec._82575.eee_disable) { @@ -2697,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_defines.h b/kernel/drivers/net/ethernet/intel/igb/e1000_defines.h index 217f81388..b1915043b 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -344,7 +344,8 @@ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ -#define E1000_RFCTL_LEF 0x00040000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 @@ -603,6 +604,10 @@ #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define E1000_M88E1112_PAGE_ADDR 0x16 #define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 @@ -860,6 +865,7 @@ #define M88_VENDOR 0x0141 #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_phy.c b/kernel/drivers/net/ethernet/intel/igb/e1000_phy.c index c1bb64d83..23ec28f43 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,5 +1,5 @@ /* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. + * Copyright(c) 2007-2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; -#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, @@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = { 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; -#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_igp_2_cable_length_table) / \ - sizeof(e1000_igp_2_cable_length_table[0])) /** * igb_check_reset_block - Check if PHY reset is blocked @@ -1268,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: reset_dsp = false; break; @@ -1276,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) reset_dsp = false; break; } - if (!reset_dsp) + if (!reset_dsp) { hw_dbg("Link taking longer than expected.\n"); - else { + } else { /* We didn't get link. * Reset the DSP and cross our fingers. */ @@ -1303,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_m88 || hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || hw->phy.id == I210_I_PHY_ID) goto out; @@ -1700,7 +1698,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1743,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->cable_length = phy_data / (is_cm ? 100 : 1); break; case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, @@ -1796,7 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1840,7 +1839,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, @@ -1863,7 +1862,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) IGP02E1000_AGC_LENGTH_MASK; /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || (cur_agc_index == 0)) { ret_val = -E1000_ERR_PHY; goto out; @@ -2195,6 +2194,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) } /** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_phy.h b/kernel/drivers/net/ethernet/intel/igb/e1000_phy.h index 7af4ffab0..24d55edbb 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -61,6 +61,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h b/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h index 6f0490d0e..4af2870e4 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -104,6 +104,8 @@ #define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ #define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ #define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ #define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ #define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ #define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ diff --git a/kernel/drivers/net/ethernet/intel/igb/igb.h b/kernel/drivers/net/ethernet/intel/igb/igb.h index 212d668da..1a2f1cc44 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb.h +++ b/kernel/drivers/net/ethernet/intel/igb/igb.h @@ -444,8 +444,8 @@ struct igb_adapter { struct ptp_pin_desc sdp_config[IGB_N_SDP]; struct { - struct timespec start; - struct timespec period; + struct timespec64 start; + struct timespec64 period; } perout[IGB_N_PEROUT]; char fw_version[32]; diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c b/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c index 0afc0913e..2529bc625 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IGB_STATS_LEN; - drvinfo->testinfo_len = IGB_TEST_LEN; - drvinfo->regdump_len = igb_get_regs_len(netdev); - drvinfo->eedump_len = igb_get_eeprom_len(netdev); } static void igb_get_ringparam(struct net_device *netdev, @@ -2159,6 +2155,27 @@ static int igb_set_coalesce(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || @@ -2396,10 +2413,6 @@ static int igb_get_ts_info(struct net_device *dev, info->rx_filters |= (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_main.c b/kernel/drivers/net/ethernet/intel/igb/igb_main.c index 4f6bf9968..ea7b09887 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_main.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_main.c @@ -57,8 +57,8 @@ #include "igb.h" #define MAJ 5 -#define MIN 2 -#define BUILD 15 +#define MIN 3 +#define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_q_vector *); -static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); #endif #ifdef CONFIG_PM @@ -1840,31 +1842,19 @@ void igb_reinit_locked(struct igb_adapter *adapter) * * @adapter: adapter struct **/ -static s32 igb_enable_mas(struct igb_adapter *adapter) +static void igb_enable_mas(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 connsw; - s32 ret_val = 0; - - connsw = rd32(E1000_CONNSW); - if (!(hw->phy.media_type == e1000_media_type_copper)) - return ret_val; + u32 connsw = rd32(E1000_CONNSW); /* configure for SerDes media detect */ - if (!(connsw & E1000_CONNSW_SERDESD)) { + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { connsw |= E1000_CONNSW_ENRGSRC; connsw |= E1000_CONNSW_AUTOSENSE_EN; wr32(E1000_CONNSW, connsw); wrfl(); - } else if (connsw & E1000_CONNSW_SERDESD) { - /* already SerDes, no need to enable anything */ - return ret_val; - } else { - netdev_info(adapter->netdev, - "MAS: Unable to configure feature, disabling..\n"); - adapter->flags &= ~IGB_FLAG_MAS_ENABLE; } - return ret_val; } void igb_reset(struct igb_adapter *adapter) @@ -1984,10 +1974,9 @@ void igb_reset(struct igb_adapter *adapter) adapter->ei.get_invariants(hw); adapter->flags &= ~IGB_FLAG_MEDIA_RESET; } - if (adapter->flags & IGB_FLAG_MAS_ENABLE) { - if (igb_enable_mas(adapter)) - dev_err(&pdev->dev, - "Error enabling Media Auto Sense\n"); + if ((mac->type == e1000_82575) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); } if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); @@ -2662,7 +2651,11 @@ err_eeprom: if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif pci_iounmap(pdev, hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -2822,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev) */ igb_release_hw_control(adapter); - unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@ -2864,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) return; pci_sriov_set_totalvfs(pdev, 7); - igb_pci_enable_sriov(pdev, max_vfs); + igb_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } @@ -2993,6 +2986,11 @@ static int igb_sw_init(struct igb_adapter *adapter) } #endif /* CONFIG_PCI_IOV */ + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + igb_probe_vfs(adapter); + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ @@ -3005,8 +3003,6 @@ static int igb_sw_init(struct igb_adapter *adapter) return -ENOMEM; } - igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -5001,6 +4997,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; + unsigned short f; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); u8 hdr_len = 0; @@ -5011,14 +5008,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, * + 1 desc for context descriptor, * otherwise try next time */ - if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { - unsigned short f; - - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - } else { - count += skb_shinfo(skb)->nr_frags; - } + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -5401,7 +5392,7 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; - struct timespec ts; + struct timespec64 ts; u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); if (tsicr & TSINTR_SYS_WRAP) { @@ -5421,10 +5412,11 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) if (tsicr & TSINTR_TT0) { spin_lock(&adapter->tmreg_lock); - ts = timespec_add(adapter->perout[0].start, - adapter->perout[0].period); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + /* u32 conversion of tv_sec is safe until y2106 */ wr32(E1000_TRGTTIML0, ts.tv_nsec); - wr32(E1000_TRGTTIMH0, ts.tv_sec); + wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); tsauxc = rd32(E1000_TSAUXC); tsauxc |= TSAUXC_EN_TT0; wr32(E1000_TSAUXC, tsauxc); @@ -5435,10 +5427,10 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) if (tsicr & TSINTR_TT1) { spin_lock(&adapter->tmreg_lock); - ts = timespec_add(adapter->perout[1].start, - adapter->perout[1].period); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); wr32(E1000_TRGTTIML1, ts.tv_nsec); - wr32(E1000_TRGTTIMH1, ts.tv_sec); + wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); tsauxc = rd32(E1000_TSAUXC); tsauxc |= TSAUXC_EN_TT1; wr32(E1000_TSAUXC, tsauxc); @@ -6372,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector, napi); bool clean_complete = true; + int work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) @@ -6380,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget) if (q_vector->tx.ring) clean_complete = igb_clean_tx_irq(q_vector); - if (q_vector->rx.ring) - clean_complete &= igb_clean_rx_irq(q_vector, budget); + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + clean_complete &= (cleaned < budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* If not enough Rx work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); igb_ring_irq_enable(q_vector); return 0; @@ -6651,22 +6648,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -6678,8 +6678,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -6821,62 +6834,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, } /** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -6903,10 +6860,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -6956,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } -static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; @@ -7030,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); - return total_packets < budget; + return total_packets; } static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, @@ -7475,6 +7428,7 @@ static int igb_resume(struct device *dev) if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + rtnl_unlock(); return -ENOMEM; } @@ -7568,6 +7522,7 @@ static int igb_sriov_reinit(struct pci_dev *dev) igb_init_queue_configuration(adapter); if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_ptp.c b/kernel/drivers/net/ethernet/intel/igb/igb_ptp.c index c3a9392cb..c44df87c3 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -143,7 +143,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter, * sub-nanosecond resolution. */ wr32(E1000_SYSTIML, ts->tv_nsec); - wr32(E1000_SYSTIMH, ts->tv_sec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); } /** @@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) wr32(E1000_CTRL_EXT, ctrl_ext); } -static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) { static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, @@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, @@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) tssdp &= ~AUX1_TS_SDP_EN; tssdp &= ~ts_sdp_sel_clr[pin]; - if (chan == 1) - tssdp |= ts_sdp_sel_tt1[pin]; - else - tssdp |= ts_sdp_sel_tt0[pin]; - + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } tssdp |= ts_sdp_en[pin]; wr32(E1000_TSSDP, tssdp); @@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); struct e1000_hw *hw = &igb->hw; - u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; unsigned long flags; - struct timespec ts; - int pin = -1; + struct timespec64 ts; + int use_freq = 0, pin = -1; s64 ns; switch (rq->type) { @@ -509,42 +523,60 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, } ts.tv_sec = rq->perout.period.sec; ts.tv_nsec = rq->perout.period.nsec; - ns = timespec_to_ns(&ts); + ns = timespec64_to_ns(&ts); ns = ns >> 1; - if (on && ns < 500000LL) { - /* 2k interrupts per second is an awful lot. */ - return -EINVAL; + if (on && ns <= 70000000LL) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; } - ts = ns_to_timespec(ns); + ts = ns_to_timespec64(ns); if (rq->perout.index == 1) { - tsauxc_mask = TSAUXC_EN_TT1; - tsim_mask = TSINTR_TT1; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } trgttiml = E1000_TRGTTIML1; trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; } else { - tsauxc_mask = TSAUXC_EN_TT0; - tsim_mask = TSINTR_TT0; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } trgttiml = E1000_TRGTTIML0; trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; } spin_lock_irqsave(&igb->tmreg_lock, flags); tsauxc = rd32(E1000_TSAUXC); tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } if (on) { int i = rq->perout.index; - - igb_pin_perout(igb, i, pin); + igb_pin_perout(igb, i, pin, use_freq); igb->perout[i].start.tv_sec = rq->perout.start.sec; igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; wr32(trgttimh, rq->perout.start.sec); wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); tsauxc |= tsauxc_mask; tsim |= tsim_mask; - } else { - tsauxc &= ~tsauxc_mask; - tsim &= ~tsim_mask; } wr32(E1000_TSAUXC, tsauxc); wr32(E1000_TSIM, tsim); diff --git a/kernel/drivers/net/ethernet/intel/igbvf/ethtool.c b/kernel/drivers/net/ethernet/intel/igbvf/ethtool.c index c6996feb1..b74ce53d7 100644 --- a/kernel/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/kernel/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = igbvf_get_regs_len(netdev); - drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); } static void igbvf_get_ringparam(struct net_device *netdev, diff --git a/kernel/drivers/net/ethernet/intel/igbvf/netdev.c b/kernel/drivers/net/ethernet/intel/igbvf/netdev.c index 95af14e13..297af801f 100644 --- a/kernel/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/kernel/drivers/net/ethernet/intel/igbvf/netdev.c @@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); + buffer_info->dma = 0; skb_put(skb, hlen); } @@ -1210,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget) /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->requested_itr & 3) igbvf_set_itr(adapter); @@ -2614,6 +2615,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_poll_controller = igbvf_netpoll, #endif .ndo_set_features = igbvf_set_features, + .ndo_features_check = passthru_features_check, }; /** diff --git a/kernel/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/kernel/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index b311e9e71..d2b29b490 100644 --- a/kernel/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGB_STATS_LEN; - drvinfo->regdump_len = ixgb_get_regs_len(netdev); - drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); } static void diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 636f9e350..1d2174526 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -152,9 +152,17 @@ struct vf_data_storage { u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; + u8 trusted; + int xcast_mode; unsigned int vf_api; }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + struct vf_macvlans { struct list_head l; int vf; @@ -539,8 +547,7 @@ struct hwmon_buff { #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 -#define IXGBE_10K_ITR 400 -#define IXGBE_8K_ITR 500 +#define IXGBE_12K_ITR 336 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, @@ -595,6 +602,7 @@ struct ixgbe_mac_addr { /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ /* board specific private data structure */ struct ixgbe_adapter { @@ -630,6 +638,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) +#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) @@ -643,6 +652,10 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) +#ifdef CONFIG_IXGBE_VXLAN +#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) +#endif /* Tx fast path data */ int num_tx_queues; @@ -703,6 +716,7 @@ struct ixgbe_adapter { u32 link_speed; bool link_up; + unsigned long sfp_poll_time; unsigned long link_check_timeout; struct timer_list service_timer; @@ -756,7 +770,9 @@ struct ixgbe_adapter { u32 timer_event_accumulator; u32 vferr_refcount; struct ixgbe_mac_addr *mac_table; +#ifdef CONFIG_IXGBE_VXLAN u16 vxlan_port; +#endif struct kobject *info_kobj; #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff *ixgbe_hwmon_buff; @@ -966,4 +982,5 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_reta(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 824a7ab79..65db69b86 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1225,7 +1225,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, - .check_overtemp = &ixgbe_tn_check_overtemp, + .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82598_info = { @@ -1234,4 +1234,5 @@ struct ixgbe_info ixgbe_82598_info = { .mac_ops = &mac_ops_82598, .eeprom_ops = &eeprom_ops_82598, .phy_ops = &phy_ops_82598, + .mvals = ixgbe_mvals_8259X, }; diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e0c363948..a39afcf03 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -44,9 +44,8 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); @@ -71,7 +70,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { u32 fwsm, manc, factps; - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) return false; @@ -79,7 +78,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw) if (!(manc & IXGBE_MANC_RCV_TCO_EN)) return false; - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (factps & IXGBE_FACTPS_MNGCG) return false; @@ -109,6 +108,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; } else { if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && @@ -504,16 +506,12 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) **/ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { - u32 autoc2_reg, fwsm; + u32 autoc2_reg; u16 ee_ctrl_2 = 0; hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); - /* Check to see if MNG FW could be enabled */ - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); - - if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && - !hw->wol_enabled && + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; @@ -650,176 +648,32 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set * - * Set the link speed in the AUTOC register and restarts link. - **/ -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) + * Set module link speed via RS0/RS1 rate select pins. + */ +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { - s32 status = 0; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - u32 speedcnt = 0; u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - u32 i = 0; - bool link_up = false; - bool autoneg = false; - - /* Mask off requested but non-supported speeds */ - status = hw->mac.ops.get_link_capabilities(hw, &link_speed, - &autoneg); - if (status != 0) - return status; - - speed &= link_speed; - - /* - * Try each speed one by one, highest priority first. We do this in - * software because 10gb fiber doesn't support speed autonegotiation. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects MAC link speed */ - break; - default: - hw_dbg(hw, "Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (1G->10G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status != 0) - return status; - - /* Flap the tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted. 82599 uses the same timing for 10g SFI. - */ - for (i = 0; i < 5; i++) { - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) - return status; - - if (link_up) - goto out; - } - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - speedcnt++; - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects MAC link speed */ - break; - default: - hw_dbg(hw, "Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (10G->1G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status != 0) - return status; - /* Flap the tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); - - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if (link_up) - goto out; + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; } - /* - * We didn't get link. Configure back to the highest speed we tried, - * (if there was more than one). We call ourselves back with just the - * single highest speed that the user requested. - */ - if (speedcnt > 1) - status = ixgbe_setup_mac_link_multispeed_fiber(hw, - highest_link_speed, - autoneg_wait_to_complete); - -out: - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - return status; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); } /** @@ -1246,6 +1100,25 @@ mac_reset_top: } /** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return 0; + udelay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ @@ -1253,6 +1126,8 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; + s32 err; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; @@ -1260,15 +1135,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ - for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - IXGBE_FDIRCMD_CMD_MASK)) - break; - udelay(10); - } - if (i >= IXGBE_FDIRCMD_CMD_POLL) { - hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); @@ -1394,14 +1264,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | @@ -1509,20 +1377,28 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { - u64 fdirhashcmd; - u32 fdircmd; + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ - switch (input.formatted.flow_type) { + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: @@ -1538,8 +1414,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits @@ -1746,6 +1624,16 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); @@ -1760,6 +1648,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id, u8 queue) { u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), @@ -1808,6 +1697,11 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; + } return 0; } @@ -1817,9 +1711,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id) { u32 fdirhash; - u32 fdircmd = 0; - u32 retry_count; - s32 err = 0; + u32 fdircmd; + s32 err; /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; @@ -1832,18 +1725,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); - for (retry_count = 10; retry_count; retry_count--) { - /* allow 10us for query to process */ - udelay(10); - /* verify query completed successfully */ - fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); - if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - break; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; } - if (!retry_count) - err = IXGBE_ERR_FDIR_REINIT_FAILED; - /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1852,7 +1739,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } - return err; + return 0; } /** @@ -2378,4 +2265,5 @@ struct ixgbe_info ixgbe_82599_info = { .eeprom_ops = &eeprom_ops_82599, .phy_ops = &phy_ops_82599, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_8259X, }; diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 06d8f3cfa..ce61b36b9 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -57,6 +57,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +/* Base table for registers values that change by MAC */ +const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(8259X) +}; + /** * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow * control @@ -91,6 +96,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_T3_LOM: case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550EM_X_10G_T: supported = true; break; default: @@ -290,13 +297,13 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) /* Setup flow control */ ret_val = ixgbe_setup_fc(hw); - if (!ret_val) - return 0; + if (ret_val) + return ret_val; /* Clear adapter stopped flag */ hw->adapter_stopped = false; - return ret_val; + return 0; } /** @@ -463,7 +470,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } } - if (hw->mac.type == ixgbe_mac_X540) { + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); @@ -681,7 +688,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) bus->lan_id = bus->func; /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; } @@ -799,7 +806,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * Check for EEPROM present first. * If not present leave as none */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; @@ -1283,14 +1290,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) return IXGBE_ERR_SWFW_SYNC; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Request EEPROM Access */ eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_GNT) break; udelay(5); @@ -1299,7 +1306,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) /* Release if grant not acquired */ if (!(eec & IXGBE_EEC_GNT)) { eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw_dbg(hw, "Could not acquire EEPROM grant\n"); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); @@ -1309,7 +1316,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) /* Setup EEPROM for Read/Write */ /* Clear CS and SK */ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); return 0; @@ -1333,7 +1340,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); @@ -1353,7 +1360,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SMBI) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); return IXGBE_ERR_EEPROM; @@ -1362,16 +1369,16 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) /* Now get the semaphore between SW/FW through the SWESMBI bit */ for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Set the SW EEPROM semaphore bit to request access */ swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); /* If we set the bit successfully then we got the * semaphore. */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SWESMBI) break; @@ -1400,11 +1407,11 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } @@ -1454,15 +1461,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1480,7 +1487,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u32 mask; u32 i; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM @@ -1501,7 +1508,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, else eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); @@ -1518,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); } @@ -1539,7 +1546,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); @@ -1547,7 +1554,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) @@ -1571,7 +1578,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1588,7 +1595,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) * delay */ *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1601,19 +1608,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); @@ -2157,10 +2164,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark - * to the maximum FCRTH value. This allows the Tx - * switch to function even under heavy Rx workloads. + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. */ - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); @@ -2469,6 +2477,9 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + if (hw->mac.type >= ixgbe_mac_X550) + return 0; + /* * Before proceeding, make sure that the PCIe block does not have * transactions pending. @@ -3898,3 +3909,228 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) } } } + +/** ixgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + **/ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return false; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + fwsm &= IXGBE_FWSM_MODE_MASK; + return fwsm == IXGBE_FWSM_FW_MODE_PT; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + */ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); + if (status) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); + return; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + return; + } +} diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index f21f8a165..a0044e4a8 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -113,11 +113,14 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); +extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; + #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 #define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 @@ -132,6 +135,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 3b932fe64..23277ab15 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -259,7 +259,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { - reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); } diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index eafa9ec80..d681273bd 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -166,6 +166,8 @@ static int ixgbe_get_settings(struct net_device *netdev, /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->supported |= SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) + ecmd->supported |= SUPPORTED_2500baseX_Full; if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) @@ -177,6 +179,8 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + ecmd->advertising |= ADVERTISED_2500baseX_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ecmd->advertising |= ADVERTISED_1000baseT_Full; } else { @@ -207,6 +211,7 @@ static int ixgbe_get_settings(struct net_device *netdev, switch (adapter->hw.phy.type) { case ixgbe_phy_tn: case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: case ixgbe_phy_cu_unknown: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; @@ -285,6 +290,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_2500); + break; case IXGBE_LINK_SPEED_1GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; @@ -470,16 +478,16 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); /* NVM Register */ - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead @@ -935,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGBE_STATS_LEN; - drvinfo->testinfo_len = IXGBE_TEST_LEN; - drvinfo->regdump_len = ixgbe_get_regs_len(netdev); } static void ixgbe_get_ringparam(struct net_device *netdev, @@ -2278,7 +2283,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_10K_ITR; + tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; @@ -2594,18 +2599,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; + u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. + /* ring_cookie is a masked into a set of queues and ixgbe pools or + * we use the drop index. */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && (ring >= adapter->num_rx_queues)) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { @@ -2683,10 +2705,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx); + &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; @@ -2853,6 +2872,14 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 16; + else + return 64; +} + static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -2892,6 +2919,44 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ixgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + + ixgbe_store_reta(adapter); + + return 0; +} + static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -2923,14 +2988,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: @@ -3053,7 +3110,7 @@ static int ixgbe_get_module_info(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - u32 status; + s32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; @@ -3061,14 +3118,14 @@ static int ixgbe_get_module_info(struct net_device *dev, status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); - if (status != 0) + if (status) return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); - if (status != 0) + if (status) return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { @@ -3095,7 +3152,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; @@ -3112,7 +3169,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, else status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); - if (status != 0) + if (status) return -EIO; data[i - ee->offset] = databyte; @@ -3152,6 +3209,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_indir_size = ixgbe_rss_indir_size, .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 68e1e757e..f3168bcc7 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if (txr_count && !rxr_count) { /* tx only vector */ if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; + q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 463ff4720..aed8d029b 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -65,6 +65,9 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" +#ifdef CONFIG_IXGBE_VXLAN +#include <net/vxlan.h> +#endif char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -76,10 +79,12 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.0.1-k" +#define DRV_VERSION "4.2.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2014 Intel Corporation."; + "Copyright (c) 1999-2015 Intel Corporation."; + +static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -131,6 +136,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, /* required last entry */ {0, } }; @@ -240,13 +247,20 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, int expected_gts) { + struct ixgbe_hw *hw = &adapter->hw; int max_gts = 0; enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; struct pci_dev *pdev; - /* determine whether to use the the parent device + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. */ + if (hw->bus.type == ixgbe_bus_type_internal) + return; + + /* determine whether to use the parent device */ if (ixgbe_pcie_from_parent(&adapter->hw)) pdev = adapter->pdev->bus->parent->self; else @@ -1231,9 +1245,12 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + u32 txctrl = 0; u16 reg_offset; + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + txctrl = dca3_get_tag(tx_ring->dev, cpu); + switch (hw->mac.type) { case ixgbe_mac_82598EB: reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); @@ -1265,9 +1282,11 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); + u32 rxctrl = 0; u8 reg_idx = rx_ring->reg_idx; + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + rxctrl = dca3_get_tag(rx_ring->dev, cpu); switch (hw->mac.type) { case ixgbe_mac_82599EB: @@ -1284,6 +1303,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); @@ -1313,11 +1333,13 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { int i; - if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) - return; - /* always use CB2 mode, difference is masked in the CB driver */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; @@ -1340,7 +1362,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } /* Fall Through since DCA is disabled. */ @@ -1348,7 +1371,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); } break; } @@ -1357,14 +1381,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) } #endif /* CONFIG_IXGBE_DCA */ + +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE @@ -1411,7 +1452,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) { encap_pkt = true; skb->encapsulation = 1; - skb->ip_summed = CHECKSUM_NONE; } /* if IP and error */ @@ -2232,7 +2272,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, /* simple throttlerate management * 0-10MB/s lowest (100000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -2321,7 +2361,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) new_itr = IXGBE_20K_ITR; break; case bulk_latency: - new_itr = IXGBE_8K_ITR; + new_itr = IXGBE_12K_ITR; break; default: break; @@ -2366,7 +2406,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) * - We may have missed the interrupt so always have to * check if we got a LSC */ - if (!(eicr & IXGBE_EICR_GPI_SDP0) && + if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && !(eicr & IXGBE_EICR_LSC)) return; @@ -2386,14 +2426,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) break; default: - if (!(eicr & IXGBE_EICR_GPI_SDP0)) + if (adapter->hw.mac.type >= ixgbe_mac_X540) + return; + if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) return; break; } - e_crit(drv, - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter\n"); + e_crit(drv, "%s\n", ixgbe_overheat_msg); adapter->interrupt_event = 0; } @@ -2403,15 +2442,17 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1)) { + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { + struct ixgbe_hw *hw = &adapter->hw; + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; @@ -2421,7 +2462,8 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) * Need to check link state so complete overtemp check * on service task */ - if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && + if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || + (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2437,28 +2479,56 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } - e_crit(drv, - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter\n"); + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->phy.type == ixgbe_phy_nl) + return true; + return false; + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + return true; + default: + return false; + } + default: + return false; + } } static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; + u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); - if (eicr & IXGBE_EICR_GPI_SDP2) { + if (!ixgbe_is_sfp(hw)) + return; + + /* Later MAC's use different SDP */ + if (hw->mac.type >= ixgbe_mac_X540) + eicr_mask = IXGBE_EICR_GPI_SDP0_X540; + + if (eicr & eicr_mask) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; ixgbe_service_event_schedule(adapter); } } - if (eicr & IXGBE_EICR_GPI_SDP1) { + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); @@ -2543,6 +2613,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { + struct ixgbe_hw *hw = &adapter->hw; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ @@ -2552,7 +2623,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP0; + mask |= IXGBE_EIMS_GPI_SDP0(hw); break; case ixgbe_mac_X540: case ixgbe_mac_X550: @@ -2563,15 +2634,19 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP1(hw); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP1; - mask |= IXGBE_EIMS_GPI_SDP2; + mask |= IXGBE_EIMS_GPI_SDP1(hw); + mask |= IXGBE_EIMS_GPI_SDP2(hw); /* fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) + mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); + if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) + mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; @@ -2626,6 +2701,13 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + if (hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X540)) { + adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X540); + } if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2693,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; #ifdef CONFIG_IXGBE_DCA @@ -2714,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget) < per_ring_budget); + ixgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ @@ -2724,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -3254,7 +3340,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. */ -static void ixgbe_store_reta(struct ixgbe_adapter *adapter) +void ixgbe_store_reta(struct ixgbe_adapter *adapter) { u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; @@ -3641,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), adapter->num_vfs); - /* Ensure LLDP is set for Ethertype Antispoofing if we will be + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be * calling set_ethertype_anti_spoofing for each VF in loop below */ - if (hw->mac.ops.set_ethertype_anti_spoofing) + if (hw->mac.ops.set_ethertype_anti_spoofing) { IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | /* enable filter */ - IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */ - IXGBE_ETH_P_LLDP)); /* LLDP eth type */ + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + } /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { @@ -3718,8 +3810,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or @@ -3733,6 +3823,11 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) */ rdrxctl |= IXGBE_RDRXCTL_MVMEN; break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (adapter->num_vfs) + rdrxctl |= IXGBE_RDRXCTL_PSP; + /* fall through for older HW */ case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -4212,6 +4307,21 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) } } +static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); +#ifdef CONFIG_IXGBE_VXLAN + adapter->vxlan_port = 0; +#endif + break; + default: + break; + } +} + #ifdef CONFIG_IXGBE_DCB /** * ixgbe_configure_dcb - Configure DCB hardware @@ -4693,6 +4803,12 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) break; } +#ifdef CONFIG_IXGBE_DCA + /* configure DCA */ + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) + ixgbe_setup_dca(adapter); +#endif /* CONFIG_IXGBE_DCA */ + #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); @@ -4703,32 +4819,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_dfwd(adapter); } -static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->phy.type) { - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_active_unknown: - case ixgbe_phy_sfp_ftl_active: - case ixgbe_phy_qsfp_passive_unknown: - case ixgbe_phy_qsfp_active_unknown: - case ixgbe_phy_qsfp_intel: - case ixgbe_phy_qsfp_unknown: - /* ixgbe_phy_none is set when no SFP module is present */ - case ixgbe_phy_none: - return true; - case ixgbe_phy_nl: - if (hw->mac.type == ixgbe_mac_82598EB) - return true; - default: - return false; - } -} - /** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct @@ -4745,6 +4835,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; } /** @@ -4757,7 +4848,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 speed; bool autoneg, link_up = false; - u32 ret = IXGBE_ERR_LINK_SETUP; + int ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); @@ -4833,10 +4924,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP0_GPIEN; - break; - case ixgbe_mac_X540: - gpie |= IXGBE_EIMS_TS; + gpie |= IXGBE_SDP0_GPIEN_8259X; break; default: break; @@ -4845,11 +4933,17 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP1_GPIEN(hw); - if (hw->mac.type == ixgbe_mac_82599EB) { - gpie |= IXGBE_SDP1_GPIEN; - gpie |= IXGBE_SDP2_GPIEN; + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; + break; + case ixgbe_mac_X550EM_x: + gpie |= IXGBE_SDP0_GPIEN_X540; + break; + default: + break; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); @@ -4873,6 +4967,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); + if (hw->phy.ops.set_phy_power) + hw->phy.ops.set_phy_power(hw, true); + smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); @@ -4992,6 +5089,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); + + if (hw->phy.ops.set_phy_power) { + if (!netif_running(adapter->netdev) && !adapter->wol) + hw->phy.ops.set_phy_power(hw, false); + else + hw->phy.ops.set_phy_power(hw, true); + } } /** @@ -5162,11 +5266,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); - -#ifdef CONFIG_IXGBE_DCA - /* since we reset the hardware DCA settings were cleared */ - ixgbe_setup_dca(adapter); -#endif } /** @@ -5212,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->atr_sample_rate = 20; fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); @@ -5238,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82598EB: adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; @@ -5260,7 +5357,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X540: - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; @@ -5269,6 +5366,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif +#ifdef CONFIG_IXGBE_VXLAN + adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; +#endif break; default: break; @@ -5672,6 +5772,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) static int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int err, queues; /* disallow open during test */ @@ -5719,16 +5820,19 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); -#if IS_ENABLED(CONFIG_IXGBE_VXLAN) + ixgbe_clear_vxlan_port(adapter); +#ifdef CONFIG_IXGBE_VXLAN vxlan_get_rx_port(netdev); - #endif + return 0; err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); + if (hw->phy.ops.set_phy_power && !adapter->wol) + hw->phy.ops.set_phy_power(&adapter->hw, false); err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: @@ -5741,7 +5845,15 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) { ixgbe_ptp_suspend(adapter); - ixgbe_down(adapter); + if (adapter->hw.phy.ops.enter_lplu) { + adapter->hw.phy.reset_disable = true; + ixgbe_down(adapter); + adapter->hw.phy.ops.enter_lplu(&adapter->hw); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_down(adapter); + } + ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); @@ -5889,6 +6001,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) } *enable_wake = !!wufc; + if (hw->phy.ops.set_phy_power && !*enable_wake) + hw->phy.ops.set_phy_power(hw, false); ixgbe_release_hw_control(adapter); @@ -6305,6 +6419,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) struct net_device *upper; struct list_head *iter; u32 link_speed = adapter->link_speed; + const char *speed_str; bool flow_rx, flow_tx; /* only continue if link was previously down */ @@ -6342,14 +6457,24 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); - e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? - "10 Gbps" : - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : - (link_speed == IXGBE_LINK_SPEED_100_FULL ? - "100 Mbps" : - "unknown speed"))), + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed_str = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + speed_str = "2.5 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed_str = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; + default: + speed_str = "unknown speed"; + break; + } + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); @@ -6606,10 +6731,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; + adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; + err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; @@ -6718,6 +6849,26 @@ static void ixgbe_service_timer(unsigned long data) ixgbe_service_event_schedule(adapter); } +static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + + if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; + + if (!hw->phy.ops.handle_lasi) + return; + + status = hw->phy.ops.handle_lasi(&adapter->hw); + if (status != IXGBE_ERR_OVERTEMP) + return; + + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) @@ -6758,7 +6909,14 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } +#ifdef CONFIG_IXGBE_VXLAN + if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; + vxlan_get_rx_port(adapter->netdev); + } +#endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); + ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); @@ -6853,31 +7011,55 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && !(first->tx_flags & IXGBE_TX_FLAGS_CC)) return; + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; } else { u8 l4_hdr = 0; - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_inner_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } + + /* use first 4 bits to determine IP version */ + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; + l4_hdr = network_hdr.ipv4->protocol; break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; + case 6: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; + l4_hdr = network_hdr.ipv6->nexthdr; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); + "partial checksum but version=%d\n", + network_hdr.ipv4->version); } - break; } switch (l4_hdr) { case IPPROTO_TCP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_SCTP: @@ -6903,7 +7085,6 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, } /* vlan_macip_lens: MACLEN, VLAN tag */ - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, @@ -7158,6 +7339,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; + struct sk_buff *skb; +#ifdef CONFIG_IXGBE_VXLAN + u8 encap = false; +#endif /* CONFIG_IXGBE_VXLAN */ __be16 vlan_id; /* if ring doesn't have a interrupt vector, cannot perform ATR */ @@ -7171,16 +7356,36 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ring->atr_count++; /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(first->skb); + skb = first->skb; + hdr.network = skb_network_header(skb); + if (skb->encapsulation) { +#ifdef CONFIG_IXGBE_VXLAN + struct ixgbe_adapter *adapter = q_vector->adapter; - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + if (!adapter->vxlan_port) + return; + if (first->protocol != htons(ETH_P_IP) || + hdr.ipv4->version != IPVERSION || + hdr.ipv4->protocol != IPPROTO_UDP) { + return; + } + if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port) + return; + encap = true; + hdr.network = skb_inner_network_header(skb); + th = inner_tcp_hdr(skb); +#else return; - - th = tcp_hdr(first->skb); +#endif /* CONFIG_IXGBE_VXLAN */ + } else { + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((first->protocol != htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (first->protocol != htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; + th = tcp_hdr(skb); + } /* skip this packet since it is invalid or the socket is closing */ if (!th || th->fin) @@ -7229,6 +7434,11 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[3]; } +#ifdef CONFIG_IXGBE_VXLAN + if (encap) + input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; +#endif /* CONFIG_IXGBE_VXLAN */ + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); @@ -7694,9 +7904,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) bool pools; /* Hardware supports up to 8 traffic classes */ - if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || - (hw->mac.type == ixgbe_mac_82598EB && - tc < MAX_TRAFFIC_CLASS)) + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); @@ -7709,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) */ if (netif_running(dev)) ixgbe_close(dev); + else + ixgbe_reset(adapter); + ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_IXGBE_DCB @@ -7855,12 +8069,23 @@ static int ixgbe_set_features(struct net_device *netdev, need_reset = true; netdev->features = features; + +#ifdef CONFIG_IXGBE_VXLAN + if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + else + ixgbe_clear_vxlan_port(adapter); + } +#endif /* CONFIG_IXGBE_VXLAN */ + if (need_reset) ixgbe_do_reset(netdev); return 0; } +#ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev @@ -7874,17 +8099,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, struct ixgbe_hw *hw = &adapter->hw; u16 new_port = ntohs(port); + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + if (sa_family == AF_INET6) return; - if (adapter->vxlan_port == new_port) { - netdev_info(dev, "Port %d already offloaded\n", new_port); + if (adapter->vxlan_port == new_port) return; - } if (adapter->vxlan_port) { netdev_info(dev, - "Hit Max num of UDP ports, not adding port %d\n", + "Hit Max num of VXLAN ports, not adding port %d\n", new_port); return; } @@ -7903,9 +8129,11 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, __be16 port) { struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; u16 new_port = ntohs(port); + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + if (sa_family == AF_INET6) return; @@ -7915,9 +8143,10 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, return; } - adapter->vxlan_port = 0; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0); + ixgbe_clear_vxlan_port(adapter); + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } +#endif /* CONFIG_IXGBE_VXLAN */ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@ -8022,7 +8251,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { - u32 status; + int status; __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) @@ -8052,7 +8281,8 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0, nlflags); + adapter->bridge_mode, 0, 0, nlflags, + filter_mask, NULL); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -8091,7 +8321,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) return ERR_PTR(-EBUSY); - fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); + fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); if (!fwd_adapter) return ERR_PTR(-ENOMEM); @@ -8147,6 +8377,21 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } +#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation) + return features; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + IXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_ALL_CSUM; + + return features; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -8165,6 +8410,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB @@ -8192,8 +8438,11 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, +#ifdef CONFIG_IXGBE_VXLAN .ndo_add_vxlan_port = ixgbe_add_vxlan_port, .ndo_del_vxlan_port = ixgbe_del_vxlan_port, +#endif /* CONFIG_IXGBE_VXLAN */ + .ndo_features_check = ixgbe_features_check, }; /** @@ -8291,6 +8540,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, break; case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: /* check eeprom to see if enabled wol */ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && @@ -8431,10 +8684,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; + hw->mvals = ii->mvals; /* EEPROM */ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_ioremap; @@ -8490,8 +8744,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; - if (err == IXGBE_ERR_SFP_NOT_PRESENT && - hw->mac.type == ixgbe_mac_82598EB) { + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); @@ -8548,17 +8801,24 @@ skip_sriov: netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; +#ifdef CONFIG_IXGBE_VXLAN switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; break; default: break; } +#endif /* CONFIG_IXGBE_VXLAN */ #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; @@ -8645,9 +8905,10 @@ skip_sriov: hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); /* pick up the PCI bus settings for reporting later */ - hw->mac.ops.get_bus_info(hw); if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); + else + hw->mac.ops.get_bus_info(hw); /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough @@ -8795,7 +9056,8 @@ static void ixgbe_remove(struct pci_dev *pdev) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); } #endif @@ -8806,17 +9068,12 @@ static void ixgbe_remove(struct pci_dev *pdev) /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); +#ifdef CONFIG_PCI_IOV + ixgbe_disable_sriov(adapter); +#endif if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); -#ifdef CONFIG_PCI_IOV - /* - * Only disable SR-IOV on unload if the user specified the now - * deprecated max_vfs module parameter. - */ - if (max_vfs) - ixgbe_disable_sriov(adapter); -#endif ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index b1e4703ff..8daa95f74 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 8a2be4441..fb8673d63 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -100,16 +100,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) } /** - * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore * * Returns an error code on error. - **/ -s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) + */ +static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 10; @@ -124,7 +125,7 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ @@ -157,13 +158,15 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, if (ixgbe_clock_out_i2c_bit(hw, false)) goto fail; ixgbe_i2c_stop(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); *val = (high_bits << 8) | low_bits; return 0; fail: ixgbe_i2c_bus_clear(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read combined error - Retry.\n"); @@ -175,17 +178,49 @@ fail: } /** - * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write + * @lock: true if to take and release semaphore * * Returns an error code on error. - **/ -s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) + */ +static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val, bool lock) { + u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; int retry = 0; u8 reg_high; @@ -197,6 +232,8 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); csum = ~csum; do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) @@ -217,10 +254,14 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte write combined error - Retry.\n"); @@ -232,6 +273,36 @@ fail: } /** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * @@ -243,9 +314,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) u16 ext_ability = 0; if (!hw->phy.phy_semaphore_mask) { - hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & - IXGBE_STATUS_LAN_ID_1; - if (hw->phy.lan_id) + if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; @@ -317,14 +386,14 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) **/ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) { - u32 status; + s32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &phy_id_high); - if (status == 0) { + if (!status) { hw->phy.id = (u32)(phy_id_high << 16); status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, &phy_id_low); @@ -347,6 +416,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; + case X550_PHY_ID: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; @@ -356,6 +426,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case ATH_PHY_ID: phy_type = ixgbe_phy_nl; break; + case X557_PHY_ID: + phy_type = ixgbe_phy_x550em_ext_t; + break; default: phy_type = ixgbe_phy_unknown; break; @@ -604,12 +677,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u32 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; + u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, @@ -733,39 +801,61 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, } /** - * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * ixgbe_get_copper_speeds_supported - Get copper link speed from phy * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value * - * Determines the link capabilities by reading the AUTOC register. + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. */ -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { - s32 status; u16 speed_ability; - - *speed = 0; - *autoneg = true; + s32 status; status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); + if (status) + return status; - if (status == 0) { - if (speed_ability & MDIO_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_100) - *speed |= IXGBE_LINK_SPEED_100_FULL; + if (speed_ability & MDIO_SPEED_10G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: + break; } - /* Internal PHY does not support 100 Mbps */ - if (hw->mac.type == ixgbe_mac_X550EM_x) - *speed &= ~IXGBE_LINK_SPEED_100_FULL; + return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = 0; + + *autoneg = true; + if (!hw->phy.speeds_supported) + status = ixgbe_get_copper_speeds_supported(hw); + *speed = hw->phy.speeds_supported; return status; } @@ -1081,6 +1171,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) return IXGBE_ERR_SFP_NOT_PRESENT; } + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); @@ -1088,9 +1181,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (status) goto err_read_i2c_eeprom; - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1140,7 +1230,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type = ixgbe_sfp_type_lr; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } else if (hw->mac.type == ixgbe_mac_82599EB) { + } else { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = @@ -1348,6 +1438,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) return IXGBE_ERR_SFP_NOT_PRESENT; } + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); @@ -1361,9 +1454,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) hw->phy.id = identifier; - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); @@ -1641,26 +1731,46 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * ixgbe_is_sfp_probe - Returns true if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return true; + return false; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @data: value read + * @lock: true if to take and release semaphore * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. - **/ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) + */ +static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; + *data = 0; do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); @@ -1702,12 +1812,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; fail: ixgbe_i2c_bus_clear(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(100); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(100); + } retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); @@ -1716,29 +1830,60 @@ fail: } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return status; } /** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @data: value to write + * @lock: true if to take and release semaphore * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. - **/ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) + */ +static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) { s32 status; u32 max_retry = 1; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; do { @@ -1769,7 +1914,9 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; fail: ixgbe_i2c_bus_clear(hw); @@ -1780,20 +1927,56 @@ fail: hw_dbg(hw, "I2C byte write error.\n"); } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); return status; } /** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** * ixgbe_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + + i2cctl |= IXGBE_I2C_BB_EN(hw); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1819,10 +2002,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1835,6 +2023,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) /* bus free time between stop and start (4.7us)*/ udelay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } } /** @@ -1849,6 +2044,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) s32 i; bool bit = false; + *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; @@ -1880,9 +2076,10 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1896,22 +2093,28 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) **/ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); s32 status = 0; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 timeout = 10; bool ack = true; + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); - /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); @@ -1941,14 +2144,21 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1969,7 +2179,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -1997,22 +2207,29 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + } + for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) break; } } @@ -2023,13 +2240,15 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. **/ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); + *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -2043,22 +2262,34 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. **/ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + *i2cctl |= IXGBE_I2C_DATA_OUT(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); + *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); + *i2cctl &= ~data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + if (!data) /* Can't verify data in this case */ + return 0; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; @@ -2073,10 +2304,20 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. **/ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + udelay(IXGBE_I2C_T_FALL); + } + + if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) return true; return false; } @@ -2090,10 +2331,11 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl; u32 i; ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -2137,3 +2379,36 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) return IXGBE_ERR_OVERTEMP; } + +/** ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + **/ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + /* Bail if we don't have copper phy */ + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 434643881..5abd66c84 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -66,6 +66,9 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 @@ -78,9 +81,29 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 #define IXGBE_CS4227 0xBE /* CS4227 address */ -#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 #define IXGBE_CS4227_EDC_MODE_CX1 0x0002 #define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander addr */ +#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ +#define IXGBE_PE_CONFIG 3 /* Config reg offset */ +#define IXGBE_PE_BIT1 (1 << 1) /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 @@ -109,6 +132,8 @@ #define IXGBE_I2C_T_SU_STO 4 #define IXGBE_I2C_T_BUF 5 +#define IXGBE_SFP_DETECT_RETRIES 2 + #define IXGBE_TN_LASI_STATUS_REG 0x9005 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 @@ -145,6 +170,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, @@ -153,8 +179,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, @@ -163,6 +193,10 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); #endif /* _IXGBE_PHY_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1d17b5872..fcd8b27a0 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * we want to disable the querying by default. */ adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } return 0; @@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); +} + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; return 0; } diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2c197e6d1..dad925706 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index dd6ba5916..995f03107 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -91,14 +91,24 @@ #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_CAT(r, m) IXGBE_##r##_##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)]) + /* General Registers */ #define IXGBE_CTRL 0x00000 #define IXGBE_STATUS 0x00008 #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - 0x15F5C : 0x00028)) + +#define IXGBE_I2CCTL_8259X 0x00028 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL) + #define IXGBE_LEDCTL 0x00200 #define IXGBE_FRTIMER 0x00048 #define IXGBE_TCPTIMER 0x0004C @@ -106,17 +116,39 @@ #define IXGBE_EXVET 0x05078 /* NVM Registers */ -#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_8259X 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC_8259X +#define IXGBE_EEC_X550 IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC) #define IXGBE_EERD 0x10014 #define IXGBE_EEWR 0x10018 -#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_8259X 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA_8259X +#define IXGBE_FLA_X550 IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 #define IXGBE_FLMNGCTL 0x10118 #define IXGBE_FLMNGDATA 0x1011C #define IXGBE_FLMNGCNT 0x10120 #define IXGBE_FLOP 0x1013C -#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_8259X 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC_8259X +#define IXGBE_GRC_X550 IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL_8259X 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL) /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ @@ -126,14 +158,55 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00004000 : 0x00000001) -#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00000200 : 0x00000002) -#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00001000 : 0x00000004) -#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00000400 : 0x00000008) +#define IXGBE_I2C_CLK_IN_8259X 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT_8259X 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN_8259X 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT_8259X 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN_8259X 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN_8259X 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN_8259X 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) + #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -329,6 +402,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FDIRSIP4M 0x0EE40 #define IXGBE_FDIRTCPM 0x0EE44 #define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 #define IXGBE_FDIRIP6M 0x0EE74 #define IXGBE_FDIRM 0x0EE70 @@ -775,6 +849,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ @@ -783,6 +858,24 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -835,15 +928,36 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_GSCN_1 0x11024 #define IXGBE_GSCN_2 0x11028 #define IXGBE_GSCN_3 0x1102C -#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_8259X 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS) + #define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM 0x10140 -#define IXGBE_FWSM 0x10148 +#define IXGBE_SWSM_8259X 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM) +#define IXGBE_FWSM_8259X 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM) #define IXGBE_GSSR 0x10160 #define IXGBE_MREVID 0x11064 #define IXGBE_DCA_ID 0x11070 #define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) /* PCIe registers 82599-specific */ #define IXGBE_GCR_EXT 0x11050 @@ -855,14 +969,21 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PHYDAT_82599 0x11044 #define IXGBE_PHYCTL_82599 0x11048 #define IXGBE_PBACLR_82599 0x11068 -#define IXGBE_CIAA_82599 0x11088 -#define IXGBE_CIAD_82599 0x1108C -#define IXGBE_CIAA_X550 0x11508 -#define IXGBE_CIAD_X550 0x11510 -#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) -#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) + +#define IXGBE_CIAA_8259X 0x11088 +#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA) + +#define IXGBE_CIAD_8259X 0x1108C +#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD) + #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -1072,6 +1193,7 @@ struct ixgbe_thermal_sensor_data { /* RDRXCTL Bit Masks */ #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ #define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */ #define IXGBE_RDRXCTL_MVMEN 0x00000020 #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ #define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ @@ -1204,18 +1326,39 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ #define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ #define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */ #define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ #define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +/* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ #define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ #define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ @@ -1233,6 +1376,8 @@ struct ixgbe_thermal_sensor_data { #define TN1010_PHY_ID 0x00A19410 #define TNX_FW_REV 0xB #define X540_PHY_ID 0x01540200 +#define X550_PHY_ID 0x01540220 +#define X557_PHY_ID 0x01540240 #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 @@ -1253,9 +1398,25 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_CONTROL_SOL_NL 0x0000 /* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ @@ -1417,9 +1578,25 @@ enum { #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ #define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ -#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + #define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ @@ -1435,9 +1612,9 @@ enum { #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ @@ -1454,9 +1631,9 @@ enum { #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ #define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ @@ -1472,9 +1649,9 @@ enum { #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ @@ -1575,6 +1752,9 @@ enum { * FCoE (0x8906): Filter 2 * 1588 (0x88f7): Filter 3 * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 */ #define IXGBE_ETQF_FILTER_EAPOL 0 #define IXGBE_ETQF_FILTER_FCOE 2 @@ -1582,6 +1762,7 @@ enum { #define IXGBE_ETQF_FILTER_FIP 4 #define IXGBE_ETQF_FILTER_LLDP 5 #define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ @@ -1773,6 +1954,7 @@ enum { #define IXGBE_GSSR_SW_MNG_SM 0x0400 #define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ #define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -1887,6 +2069,11 @@ enum { #define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ @@ -2386,9 +2573,11 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 #define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 #define IXGBE_FDIR_DROP_QUEUE 127 @@ -2679,12 +2868,13 @@ typedef u32 ixgbe_link_speed; #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 /* Software ATR input stream values and masks */ -#define IXGBE_ATR_HASH_MASK 0x7fff -#define IXGBE_ATR_L4TYPE_MASK 0x3 -#define IXGBE_ATR_L4TYPE_UDP 0x1 -#define IXGBE_ATR_L4TYPE_TCP 0x2 -#define IXGBE_ATR_L4TYPE_SCTP 0x3 -#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 enum ixgbe_atr_flow_type { IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, @@ -2741,6 +2931,37 @@ union ixgbe_atr_hash_dword { __be32 dword; }; +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(IDX), + IXGBE_MVALS_IDX_LIMIT +}; + enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, @@ -2850,9 +3071,8 @@ enum ixgbe_smart_speed { /* PCI bus types */ enum ixgbe_bus_type { ixgbe_bus_type_unknown = 0, - ixgbe_bus_type_pci, - ixgbe_bus_type_pcix, ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, ixgbe_bus_type_reserved }; @@ -3042,9 +3262,11 @@ struct ixgbe_mac_operations { void (*flap_tx_laser)(struct ixgbe_hw *); void (*stop_link_on_d3)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); /* Packet Buffer Manipulation */ void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); @@ -3112,6 +3334,13 @@ struct ixgbe_phy_operations { s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *value); + s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 value); }; struct ixgbe_eeprom_info { @@ -3121,6 +3350,7 @@ struct ixgbe_eeprom_info { u16 word_size; u16 address_bits; u16 word_page_size; + u16 ctrl_word_3; }; #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 @@ -3164,15 +3394,16 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; - u8 lan_id; u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; bool reset_if_overtemp; bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; }; #include "ixgbe_mbx.h" @@ -3216,6 +3447,7 @@ struct ixgbe_hw { struct ixgbe_eeprom_info eeprom; struct ixgbe_bus_info bus; struct ixgbe_mbx_info mbx; + const u32 *mvals; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -3234,6 +3466,7 @@ struct ixgbe_info { struct ixgbe_eeprom_operations *eeprom_ops; struct ixgbe_phy_operations *phy_ops; struct ixgbe_mbx_operations *mbx_ops; + const u32 *mvals; }; @@ -3270,16 +3503,21 @@ struct ixgbe_info { #define IXGBE_ERR_PBA_SECTION -31 #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF -#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) -#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) -#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) -#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) -#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) -#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) -#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ BIT(5) +#define IXGBE_FUSES0_REV1 BIT(6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) @@ -3339,4 +3577,6 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) #endif /* _IXGBE_TYPE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index f5f948d08..c1d4584f6 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -54,6 +54,11 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* set_phy_power was set by default to NULL */ + if (!ixgbe_mng_present(hw)) + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; @@ -202,7 +207,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + @@ -504,8 +509,8 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) return status; } - flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) @@ -514,11 +519,11 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) hw_dbg(hw, "Flash update time out\n"); if (hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC); + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); @@ -544,7 +549,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) u32 reg; for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC); + reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (reg & IXGBE_EEC_FLUDONE) return 0; udelay(5); @@ -562,62 +567,85 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) **/ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 5; - u32 hwmask = 0; + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 fwmask = swmask << 5; u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; u32 i; - if (swmask == IXGBE_GSSR_EEP_SM) + if (swmask & IXGBE_GSSR_EEP_SM) hwmask = IXGBE_GSSR_FLASH_SM; + /* SW only mask does not have FW bit pair */ + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; for (i = 0; i < timeout; i++) { - /* - * SW NVM semaphore bit is used for access to all + /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_swfw_sync_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - break; - } else { - /* - * Firmware currently using resource (fwmask), - * hardware currently using resource (hwmask), - * or other software thread currently using - * resource (swmask) - */ + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); + usleep_range(5000, 6000); + return 0; } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); } - /* - * If the resource is not released by the FW/HW the SW can assume that - * the FW/HW malfunctions. In that case the SW should sets the - * SW bit(s) of the requested resource(s) while ignoring the - * corresponding FW/HW bits in the SW_FW_SYNC register. - */ - if (i >= timeout) { - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - if (swfw_sync & (fwmask | hwmask)) { - if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; + /* Failed to get SW only semaphore */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) { + hw_dbg(hw, "Failed to get SW only semaphore\n"); + return IXGBE_ERR_SWFW_SYNC; + } - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - } + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should set the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + if (swfw_sync & (fwmask | hwmask)) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 6000); + return 0; + } + /* If the resource is not released by other SW the SW can assume that + * the other SW malfunctions. In that case the SW should clear all SW + * flags that it does not own and then repeat the whole process once + * again. + */ + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_SWFW_SYNC; } + ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); - return 0; + return IXGBE_ERR_SWFW_SYNC; } /** @@ -630,17 +658,19 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) **/ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; - u32 swmask = mask; + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); + usleep_range(5000, 6000); } /** @@ -660,7 +690,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) /* If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); @@ -674,13 +704,18 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) /* Now get the semaphore between SW/FW through the REGSMP bit */ for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swsm & IXGBE_SWFW_REGSMP)) return 0; usleep_range(50, 100); } + /* Release semaphores and return error if SW NVM semaphore + * was not granted because we do not have access to the EEPROM + */ + hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); + ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_ERR_EEPROM; } @@ -696,13 +731,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } @@ -850,9 +885,14 @@ static struct ixgbe_phy_operations phy_ops_X540 = { .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = &ixgbe_set_copper_phy_power, .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, }; +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X540) +}; + struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, @@ -860,4 +900,5 @@ struct ixgbe_info ixgbe_X540_info = { .eeprom_ops = &eeprom_ops_X540, .phy_ops = &phy_ops_X540, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X540, }; diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index cf5cf819a..ebe0ac950 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,6 +26,322 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + return 0; +} + +/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + */ +static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, + value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + */ +static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, + value); +} + +/** + * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register + * @hw: pointer to hardware structure + * @reg: the register to check + * + * Performs a diagnostic on a register in the CS4227 chip. Returns an error + * if it is not operating correctly. + * This function assumes that the caller has acquired the proper semaphore. + */ +static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg) +{ + s32 status; + u32 retry; + u16 reg_val; + + reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1; + status = ixgbe_write_cs4227(hw, reg, reg_val); + if (status) + return status; + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + msleep(IXGBE_CS4227_CHECK_DELAY); + reg_val = 0xFFFF; + ixgbe_read_cs4227(hw, reg, ®_val); + if (!reg_val) + break; + } + if (reg_val) { + hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg); + return status; + } + + return 0; +} + +/** + * ixgbe_get_cs4227_status - Return CS4227 status + * @hw: pointer to hardware structure + * + * Performs a diagnostic on the CS4227 chip. Returns an error if it is + * not operating correctly. + * This function assumes that the caller has acquired the proper semaphore. + */ +static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) +{ + s32 status; + u16 value = 0; + + /* Exit if the diagnostic has already been performed. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (status) + return status; + if (value == IXGBE_CS4227_RESET_COMPLETE) + return 0; + + /* Check port 0. */ + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB); + if (status) + return status; + + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB); + if (status) + return status; + + /* Check port 1. */ + status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB + + (1 << 12)); + if (status) + return status; + + return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB + + (1 << 12)); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + */ +static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + */ +static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, + value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + */ +static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + /* Wait for the reset to complete. */ + msleep(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msleep(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + hw_err(hw, "CS4227 reset did not complete\n"); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + hw_err(hw, "CS4227 EEPROM did not load successfully\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + */ +static void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + s32 status; + u16 value; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + msleep(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (!status && value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status || value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(IXGBE_CS4227_CHECK_DELAY); + } + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status) { + hw_err(hw, "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(10000, 12000); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d", status); + return; + } + + /* Is the CS4227 working correctly? */ + status = ixgbe_get_cs4227_status(hw); + if (status) { + hw_err(hw, "CS4227 status failed: %d", status); + goto out; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(hw->eeprom.semaphore_delay); +} + /** ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * @@ -33,19 +349,12 @@ */ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; - } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - + ixgbe_setup_mux_ctl(hw); + ixgbe_check_cs4227(hw); return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; @@ -90,7 +399,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + @@ -103,6 +412,39 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Return: failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + */ +static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete. + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) + break; + usleep_range(10, 20); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "IOSF wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + /** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the * IOSF device * @hw: pointer to hardware structure @@ -113,7 +455,17 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *data) { - u32 i, command, error; + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); + if (ret) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret) + goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); @@ -121,17 +473,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, /* Write IOSF control register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); - /* Check every 10 usec to see if the address cycle completed. - * The SB IOSF BUSY bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usleep_range(10, 20); - - command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); - if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) - break; - } + ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> @@ -140,14 +482,12 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, return IXGBE_ERR_PHY; } - if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { - hw_dbg(hw, "Read timed out\n"); - return IXGBE_ERR_PHY; - } - - *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + if (!ret) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); - return 0; +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return ret; } /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface @@ -557,6 +897,24 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_get_bus_info_X550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + hw->bus.type = ixgbe_bus_type_internal; + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return 0; +} + /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 @@ -673,111 +1031,6 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, return status; } -/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers - * @hw: pointer to hardware structure - **/ -static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* CS4227 does not support autoneg, so disable the laser control - * functions for SFP+ fiber - */ - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - } -} - -/** ixgbe_setup_sfp_modules_X550em - Setup SFP module - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) -{ - bool setup_linear; - u16 reg_slice, edc_mode; - s32 ret_val; - - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_unknown: - return 0; - case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - setup_linear = true; - break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_da_act_lmt_core0: - case ixgbe_sfp_type_da_act_lmt_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - setup_linear = false; - break; - default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - ixgbe_init_mac_link_ops_X550em(hw); - hw->phy.ops.reset = NULL; - - /* The CS4227 slice address is the base address + the port-pair reg - * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. - */ - reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); - - if (setup_linear) - edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - - /* Configure CS4227 for connection type. */ - ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, - edc_mode); - - if (ret_val) - ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, - edc_mode); - - return ret_val; -} - -/** ixgbe_get_link_capabilities_x550em - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - **/ -static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - /* CS4227 SFP must not enable auto-negotiation */ - *autoneg = false; - - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - return 0; - } - - /* Link capabilities are based on SFP */ - if (hw->phy.multispeed_fiber) - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - } - return 0; -} - /** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the * IOSF device * @@ -789,7 +1042,17 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 data) { - u32 i, command, error; + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); + if (ret) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret) + goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); @@ -800,17 +1063,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, /* Write IOSF data register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); - /* Check every 10 usec to see if the address cycle completed. - * The SB IOSF BUSY bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usleep_range(10, 20); - - command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); - if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) - break; - } + ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> @@ -819,12 +1072,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, return IXGBE_ERR_PHY; } - if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { - hw_dbg(hw, "Write timed out\n"); - return IXGBE_ERR_PHY; - } - - return 0; +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return ret; } /** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. @@ -863,7 +1113,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) } status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; @@ -945,6 +1195,476 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) return status; } +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * @hw: pointer to hardware structure + * + * Configures the extern PHY and the integrated KR PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + s32 status; + u16 slice, value; + bool setup_linear = false; + + /* Check if SFP module is supported and linear */ + status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not accepted in the setup MAC link flow. + */ + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (status) + return status; + + /* Configure CS4227 LINE side to 10G SR. */ + slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + value = IXGBE_CS4227_SPEED_10G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + /* Configure CS4227 for HOST connection rate then type. */ + slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + value = speed & IXGBE_LINK_SPEED_10GB_FULL ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, + value); + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + status = ixgbe_setup_ixfi_x550em(hw, &speed); + + return status; +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait) +{ + s32 status; + ixgbe_link_speed force_speed; + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 status; + u16 autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * Read this twice back to back to indicate current status. + */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; + + return 0; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + break; + default: + break; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + if (status) + return status; + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + return 0; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* High temperature failure alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return 0; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + **/ +static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + if (status) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + if (status) + return status; + + /* Enables high temperature failure alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + if (status) + return status; + + if (lsc && phy->ops.setup_internal_link) + return phy->ops.setup_internal_link(hw); + + return 0; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + /** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. * @hw: pointer to hardware structure * @@ -990,85 +1710,82 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - s32 status; - u32 reg_val; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; +/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + **/ +static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + *link_up = false; - /* Advertise 10G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret) + return ret; - /* Advertise 1G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret) + return ret; - /* Restart auto-negotiation. */ - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); - return status; + return 0; } -/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY +/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link * @hw: point to hardware structure * - * Configures the integrated KR PHY to talk to the external PHY. The base - * driver will call this function when it gets notification via interrupt from - * the external PHY. This function forces the internal PHY into iXFI mode at - * the correct speed. + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. * - * A return of a non-zero value indicates an error, and the base driver should - * not report link up. + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. **/ -static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { - u32 status; - u16 lasi, autoneg_status, speed; ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; - /* Verify that the external link status has changed */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + /* If link is not up, then there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; - /* If there was no change in link status, we can just exit */ - if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + if (!link_up) return 0; - /* we read this twice back to back to indicate current status */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); + &speed); if (status) return status; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); + /* If link is not still up, then no setup is necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; - /* If link is not up return an error indicating treat link as down */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - return IXGBE_ERR_INVALID_LINK_SETTINGS; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &speed); + if (!link_up) + return 0; /* clear everything but the speed and duplex bits */ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; @@ -1088,6 +1805,160 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) return ixgbe_setup_ixfi_x550em(hw, &force_speed); } +/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + if (status) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting + * the X557 PHY immediately prior to entering LPLU. + **/ +static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* SW LPLU not required on later HW revisions. */ + if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))) + return 0; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return status; + + status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, + &hw->eeprom.ctrl_word_3); + if (status) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or + * manageability disabled, then force link down by entering + * low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, false); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + if (status) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, false); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + if (status) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1098,25 +1969,32 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; + ixgbe_link_speed speed; s32 ret_val; - u32 esdp; - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + hw->mac.ops.set_lan_id(hw); - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal PHY mode is KR, then initialize KR link */ + if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); - /* Setup function pointers based on detected SFP module and speeds */ + /* Setup function pointers based on detected hardware */ ixgbe_init_mac_link_ops_X550em(hw); if (phy->sfp_type != ixgbe_sfp_type_unknown) phy->ops.reset = NULL; @@ -1134,11 +2012,35 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: - phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); + } + + /* setup SW LPLU only for first revision */ + if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, + IXGBE_FUSES0_GROUP(0)))) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; break; default: break; } + return ret_val; } @@ -1177,67 +2079,37 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { - u32 status; + s32 status; u16 reg; - u32 retries = 2; - do { - /* decrement retries counter and exit if we hit 0 */ - if (retries < 1) { - hw_dbg(hw, "External PHY not yet finished resetting."); - return IXGBE_ERR_PHY; - } - retries--; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - ®); - if (status) - return status; - - /* Verify PHY FW reset has completed */ - } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); - - /* Set port to low power mode */ - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - if (status) - return status; - - /* Enable the transmitter */ status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, IXGBE_MDIO_PMA_PMD_DEV_TYPE, ®); if (status) return status; - reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - reg); - if (status) - return status; + reg &= ~IXGBE_MDIO_POWER_UP_STALL; - /* Un-stall the PHY FW */ - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - if (status) - return status; - - reg &= ~IXGBE_MDIO_POWER_UP_STALL; + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + } - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); return status; } @@ -1254,6 +2126,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) s32 status; u32 ctrl = 0; u32 i; + u32 hlreg0; bool link_up = false; /* Call adapter stop to disable Tx/Rx and clear interrupts */ @@ -1338,6 +2211,15 @@ mac_reset_top: hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + } + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + return status; } @@ -1390,6 +2272,62 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); } +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + */ +static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return 0; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + */ +static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -1398,7 +2336,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, .get_mac_addr = &ixgbe_get_mac_addr_generic, \ .get_device_caps = &ixgbe_get_device_caps_generic, \ .stop_adapter = &ixgbe_stop_adapter_generic, \ - .get_bus_info = &ixgbe_get_bus_info_generic, \ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ .read_analog_reg8 = NULL, \ .write_analog_reg8 = NULL, \ @@ -1428,8 +2365,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ @@ -1447,7 +2382,10 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, .setup_sfp = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, }; static struct ixgbe_mac_operations mac_ops_X550EM_x = { @@ -1458,8 +2396,10 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .get_wwn_prefix = NULL, .setup_link = NULL, /* defined later */ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, - + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, }; #define X550_COMMON_EEP \ @@ -1490,6 +2430,10 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .read_reg = &ixgbe_read_phy_reg_generic, \ + .write_reg = &ixgbe_write_phy_reg_generic, \ + .setup_link = &ixgbe_setup_phy_link_generic, \ + .set_phy_power = NULL, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, @@ -1497,20 +2441,25 @@ static struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .read_i2c_combined = &ixgbe_read_i2c_combined_generic, - .write_i2c_combined = &ixgbe_write_i2c_combined_generic, }; static struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, - .read_reg = NULL, /* defined later */ - .write_reg = NULL, /* defined later */ - .setup_link = NULL, /* defined later */ + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, + .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, + .write_i2c_combined_unlocked = + &ixgbe_write_i2c_combined_generic_unlocked, +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_x) }; struct ixgbe_info ixgbe_X550_info = { @@ -1520,13 +2469,15 @@ struct ixgbe_info ixgbe_X550_info = { .eeprom_ops = &eeprom_ops_X550, .phy_ops = &phy_ops_X550, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550, }; struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, - .get_invariants = &ixgbe_get_invariants_X540, + .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_X550EM_x, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, }; diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h b/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h index 770e21a64..58434584b 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 #define IXGBE_RXDADV_SPH 0x8000 +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ IXGBE_RXD_ERR_LE | \ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c index b2f5b161d..d3e5f5b37 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_82599_RETA_SIZE; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) + return IXGBEVF_X550_VFRETA_SIZE; - return 0; + return IXGBEVF_82599_RETA_SIZE; } static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_RSS_HASH_KEY_SIZE; - - return 0; + return IXGBEVF_RSS_HASH_KEY_SIZE; } static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, @@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - /* If neither indirection table nor hash key was requested - just - * return a success avoiding taking any locks. - */ - if (!indir && !key) - return 0; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { + if (key) + memcpy(key, adapter->rss_key, sizeof(adapter->rss_key)); - spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, - adapter->num_rx_queues); + if (indir) { + int i; - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + } + } else { + /* If neither indirection table nor hash key was requested + * - just return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; - spin_unlock_bh(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + } return err; } diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 775d08900..ec3147279 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -144,9 +144,11 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 -#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ +#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ #define IXGBEVF_RSS_HASH_KEY_SIZE 40 +#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */ #define IXGBEVF_DEFAULT_TXD 1024 #define IXGBEVF_DEFAULT_RXD 512 @@ -447,6 +449,9 @@ struct ixgbevf_adapter { spinlock_t mbx_lock; unsigned long last_reset; + + u32 rss_key[IXGBEVF_VFRSSRK_REGS]; + u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; }; enum ixbgevf_state_t { @@ -466,6 +471,12 @@ enum ixgbevf_boards { board_X550EM_x_vf, }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1d7b00b03..592ff237d 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + +static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data @@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb); if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } /** - * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbevf specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - **/ -static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbevf_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif + unsigned int pull_len; - if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as is */ @@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); /* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) @@ -1009,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) @@ -1028,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget) - < per_ring_budget); + ixgbevf_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget); + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1041,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && @@ -1697,22 +1698,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vfmrqc = 0, vfreta = 0; - u32 rss_key[10]; u16 rss_i = adapter->num_rx_queues; - int i, j; + u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); - /* Fill out redirection table */ - for (i = 0, j = 0; i < 64; i++, j++) { + for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | (j * 0x1); - if ((i & 3) == 3) + + adapter->rss_indir_tbl[i] = j; + + vfreta |= j << (i & 0x3) * 8; + if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + vfreta = 0; + } } /* Perform hash on these packet types */ @@ -1890,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + unsigned int flags = netdev->flags; + int xcast_mode; + + xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; spin_lock_bh(&adapter->mbx_lock); + hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); @@ -3894,6 +3906,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif + .ndo_features_check = passthru_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h index 82f44e06e..340cdd469 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c index d1339b050..427f3605c 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + struct net_device *netdev, int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .check_link = ixgbevf_check_mac_link_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, }; diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h index d40f036b6..ef9f7736b 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -63,6 +63,7 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); |