From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/net/ethernet/intel/i40e/Makefile | 47 + kernel/drivers/net/ethernet/intel/i40e/i40e.h | 759 ++ .../drivers/net/ethernet/intel/i40e/i40e_adminq.c | 1034 ++ .../drivers/net/ethernet/intel/i40e/i40e_adminq.h | 157 + .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2337 +++++ .../drivers/net/ethernet/intel/i40e/i40e_alloc.h | 58 + .../drivers/net/ethernet/intel/i40e/i40e_common.c | 3644 +++++++ kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c | 720 ++ kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h | 112 + .../drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 321 + .../drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 2258 ++++ kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c | 154 + kernel/drivers/net/ethernet/intel/i40e/i40e_diag.h | 51 + .../drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2587 +++++ kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 1565 +++ kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h | 127 + kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c | 360 + kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h | 236 + .../drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 1143 ++ .../drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h | 181 + kernel/drivers/net/ethernet/intel/i40e/i40e_main.c | 10368 +++++++++++++++++++ kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c | 1006 ++ .../drivers/net/ethernet/intel/i40e/i40e_osdep.h | 84 + .../net/ethernet/intel/i40e/i40e_prototype.h | 311 + kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c | 728 ++ .../net/ethernet/intel/i40e/i40e_register.h | 3369 ++++++ .../drivers/net/ethernet/intel/i40e/i40e_status.h | 100 + kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2778 +++++ kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h | 313 + kernel/drivers/net/ethernet/intel/i40e/i40e_type.h | 1421 +++ .../net/ethernet/intel/i40e/i40e_virtchnl.h | 362 + .../net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2402 +++++ .../net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 130 + 33 files changed, 41223 insertions(+) create mode 100644 kernel/drivers/net/ethernet/intel/i40e/Makefile create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_alloc.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_common.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_diag.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_main.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_register.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_status.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_type.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c create mode 100644 kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h (limited to 'kernel/drivers/net/ethernet/intel/i40e') diff --git a/kernel/drivers/net/ethernet/intel/i40e/Makefile b/kernel/drivers/net/ethernet/intel/i40e/Makefile new file mode 100644 index 000000000..b4729ba57 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/Makefile @@ -0,0 +1,47 @@ +################################################################################ +# +# Intel Ethernet Controller XL710 Family Linux Driver +# Copyright(c) 2013 - 2015 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver +# + +obj-$(CONFIG_I40E) += i40e.o + +i40e-objs := i40e_main.o \ + i40e_ethtool.o \ + i40e_adminq.o \ + i40e_common.o \ + i40e_hmc.o \ + i40e_lan_hmc.o \ + i40e_nvm.o \ + i40e_debugfs.o \ + i40e_diag.o \ + i40e_txrx.o \ + i40e_ptp.o \ + i40e_virtchnl_pf.o + +i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o +i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e.h b/kernel/drivers/net/ethernet/intel/i40e/i40e.h new file mode 100644 index 000000000..5d4730712 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e.h @@ -0,0 +1,759 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_H_ +#define _I40E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "i40e_type.h" +#include "i40e_prototype.h" +#ifdef I40E_FCOE +#include "i40e_fcoe.h" +#endif +#include "i40e_virtchnl.h" +#include "i40e_virtchnl_pf.h" +#include "i40e_txrx.h" +#include "i40e_dcb.h" + +/* Useful i40e defaults */ +#define I40E_BASE_PF_SEID 16 +#define I40E_BASE_VSI_SEID 512 +#define I40E_BASE_VEB_SEID 288 +#define I40E_MAX_VEB 16 + +#define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_REGISTER 0x800000 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) +#define I40E_DEFAULT_NUM_DESCRIPTORS 512 +#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40E_MIN_NUM_DESCRIPTORS 64 +#define I40E_MIN_MSIX 2 +#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ +#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ +#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ +#define I40E_DEFAULT_QUEUES_PER_VF 4 +#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ +#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ +#define I40E_FDIR_RING 0 +#define I40E_FDIR_RING_COUNT 32 +#ifdef I40E_FCOE +#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ +#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ +#endif /* I40E_FCOE */ +#define I40E_MAX_AQ_BUF_SIZE 4096 +#define I40E_AQ_LEN 256 +#define I40E_AQ_WORK_LIMIT 32 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DEFAULT_MSG_ENABLE 4 +#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) + +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + +#define I40E_NVM_VERSION_LO_SHIFT 0 +#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) +#define I40E_NVM_VERSION_HI_SHIFT 12 +#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) + +/* The values in here are decimal coded as hex as is the case in the NVM map*/ +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 + +/* magic for getting defines into strings */ +#define STRINGIFY(foo) #foo +#define XSTRINGIFY(bar) STRINGIFY(bar) + +#define I40E_RX_DESC(R, i) \ + ((ring_is_16byte_desc_enabled(R)) \ + ? (union i40e_32byte_rx_desc *) \ + (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \ + : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))) +#define I40E_TX_DESC(R, i) \ + (&(((struct i40e_tx_desc *)((R)->desc))[i])) +#define I40E_TX_CTXTDESC(R, i) \ + (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) +#define I40E_TX_FDIRDESC(R, i) \ + (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) + +/* default to trying for four seconds */ +#define I40E_TRY_LINK_TIMEOUT (4 * HZ) + +/* driver state flags */ +enum i40e_state_t { + __I40E_TESTING, + __I40E_CONFIG_BUSY, + __I40E_CONFIG_DONE, + __I40E_DOWN, + __I40E_NEEDS_RESTART, + __I40E_SERVICE_SCHED, + __I40E_ADMINQ_EVENT_PENDING, + __I40E_MDD_EVENT_PENDING, + __I40E_VFLR_EVENT_PENDING, + __I40E_RESET_RECOVERY_PENDING, + __I40E_RESET_INTR_RECEIVED, + __I40E_REINIT_REQUESTED, + __I40E_PF_RESET_REQUESTED, + __I40E_CORE_RESET_REQUESTED, + __I40E_GLOBAL_RESET_REQUESTED, + __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, + __I40E_FILTER_OVERFLOW_PROMISC, + __I40E_SUSPENDED, + __I40E_PTP_TX_IN_PROGRESS, + __I40E_BAD_EEPROM, + __I40E_DOWN_REQUESTED, + __I40E_FD_FLUSH_REQUESTED, + __I40E_RESET_FAILED, + __I40E_PORT_TX_SUSPENDED, + __I40E_VF_DISABLE, +}; + +enum i40e_interrupt_policy { + I40E_INTERRUPT_BEST_CASE, + I40E_INTERRUPT_MEDIUM, + I40E_INTERRUPT_LOWEST +}; + +struct i40e_lump_tracking { + u16 num_entries; + u16 search_hint; + u16 list[0]; +#define I40E_PILE_VALID_BIT 0x8000 +}; + +#define I40E_DEFAULT_ATR_SAMPLE_RATE 20 +#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 +#define I40E_FDIR_BUFFER_FULL_MARGIN 10 +#define I40E_FDIR_BUFFER_HEAD_ROOM 32 +#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) + +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) + +enum i40e_fd_stat_idx { + I40E_FD_STAT_ATR, + I40E_FD_STAT_SB, + I40E_FD_STAT_PF_COUNT +}; +#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) +#define I40E_FD_ATR_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) +#define I40E_FD_SB_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) + +struct i40e_fdir_filter { + struct hlist_node fdir_node; + /* filter ipnut set */ + u8 flow_type; + u8 ip4_proto; + /* TX packet view of src and dst */ + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be32 sctp_v_tag; + /* filter control */ + u16 q_index; + u8 flex_off; + u8 pctype; + u16 dest_vsi; + u8 dest_ctl; + u8 fd_status; + u16 cnt_index; + u32 fd_id; +}; + +#define I40E_ETH_P_LLDP 0x88cc + +#define I40E_DCB_PRIO_TYPE_STRICT 0 +#define I40E_DCB_PRIO_TYPE_ETS 1 +#define I40E_DCB_STRICT_PRIO_CREDITS 127 +#define I40E_MAX_USER_PRIORITY 8 +/* DCB per TC information data structure */ +struct i40e_tc_info { + u16 qoffset; /* Queue offset from base queue */ + u16 qcount; /* Total Queues */ + u8 netdev_tc; /* Netdev TC index if netdev associated */ +}; + +/* TC configuration data structure */ +struct i40e_tc_configuration { + u8 numtc; /* Total number of enabled TCs */ + u8 enabled_tc; /* TC map */ + struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* struct that defines the Ethernet device */ +struct i40e_pf { + struct pci_dev *pdev; + struct i40e_hw hw; + unsigned long state; + unsigned long link_check_timeout; + struct msix_entry *msix_entries; + bool fc_autoneg_status; + + u16 eeprom_version; + u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ + u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ + u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ + u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_vf_qps; /* num queue pairs per VF */ +#ifdef I40E_FCOE + u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ + u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ +#endif /* I40E_FCOE */ + u16 num_lan_qps; /* num lan queues this PF has set up */ + u16 num_lan_msix; /* num queue vectors for the base PF vsi */ + int queues_left; /* queues left unclaimed */ + u16 rss_size; /* num queues in the RSS array */ + u16 rss_size_max; /* HW defined max RSS queues */ + u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ + u16 num_alloc_vsi; /* num VSIs this driver supports */ + u8 atr_sample_rate; + bool wol_en; + + struct hlist_head fdir_filter_list; + u16 fdir_pf_active_filters; + u16 fd_sb_cnt_idx; + u16 fd_atr_cnt_idx; + unsigned long fd_flush_timestamp; + u32 fd_flush_cnt; + u32 fd_add_err; + u32 fd_atr_cnt; + u32 fd_tcp_rule; + +#ifdef CONFIG_I40E_VXLAN + __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_vxlan_bitmap; + +#endif + enum i40e_interrupt_policy int_policy; + u16 rx_itr_default; + u16 tx_itr_default; + u32 msg_enable; + char int_name[I40E_INT_NAME_STR_LEN]; + u16 adminq_work_limit; /* num of admin receive queue desc to process */ + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list service_timer; + struct work_struct service_task; + + u64 flags; +#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) +#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) +#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) +#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) +#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) +#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) +#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) +#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#ifdef I40E_FCOE +#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) +#endif /* I40E_FCOE */ +#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) +#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) +#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) +#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) +#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) +#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) +#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) +#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) +#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) +#define I40E_FLAG_PTP (u64)(1 << 25) +#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#ifdef CONFIG_I40E_VXLAN +#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#endif +#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) +#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) + + /* tracks features that get auto disabled by errors */ + u64 auto_disable_flags; + +#ifdef I40E_FCOE + struct i40e_fcoe fcoe; + +#endif /* I40E_FCOE */ + bool stat_offsets_loaded; + struct i40e_hw_port_stats stats; + struct i40e_hw_port_stats stats_offsets; + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + u32 tx_sluggish_count; + u32 hw_csum_rx_error; + u32 led_status; + u16 corer_count; /* Core reset count */ + u16 globr_count; /* Global reset count */ + u16 empr_count; /* EMP reset count */ + u16 pfr_count; /* PF reset count */ + u16 sw_int_count; /* SW interrupt count */ + + struct mutex switch_mutex; + u16 lan_vsi; /* our default LAN VSI */ + u16 lan_veb; /* initial relay, if exists */ +#define I40E_NO_VEB 0xffff +#define I40E_NO_VSI 0xffff + u16 next_vsi; /* Next unallocated VSI - 0-based! */ + struct i40e_vsi **vsi; + struct i40e_veb *veb[I40E_MAX_VEB]; + + struct i40e_lump_tracking *qp_pile; + struct i40e_lump_tracking *irq_pile; + + /* switch config info */ + u16 pf_seid; + u16 main_vsi_seid; + u16 mac_seid; + struct kobject *switch_kobj; +#ifdef CONFIG_DEBUG_FS + struct dentry *i40e_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ + + u16 instance; /* A unique number per i40e_pf instance in the system */ + + /* sr-iov config info */ + struct i40e_vf *vf; + int num_alloc_vfs; /* actual number of VFs allocated */ + u32 vf_aq_requests; + + /* DCBx/DCBNL capability for PF that indicates + * whether DCBx is managed by firmware or host + * based agent (LLDPAD). Also, indicates what + * flavor of DCBx protocol (IEEE/CEE) is supported + * by the device. For now we're supporting IEEE + * mode only. + */ + u16 dcbx_cap; + + u32 fcoe_hmc_filt_num; + u32 fcoe_hmc_cntx_num; + struct i40e_filter_control_settings filter_settings; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; /* Used to protect the device time registers. */ + u64 ptp_base_adj; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + bool ptp_tx; + bool ptp_rx; + u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; +}; + +struct i40e_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; +#define I40E_VLAN_ANY -1 + s16 vlan; + u8 counter; /* number of instances of this filter */ + bool is_vf; /* filter belongs to a VF */ + bool is_netdev; /* filter belongs to a netdev */ + bool changed; /* filter needs to be sync'd to the HW */ + bool is_laa; /* filter is a Locally Administered Address */ +}; + +struct i40e_veb { + struct i40e_pf *pf; + u16 idx; + u16 veb_idx; /* index of VEB parent */ + u16 seid; + u16 uplink_seid; + u16 stats_idx; /* index of VEB parent */ + u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ + u16 flags; + u16 bw_limit; + u8 bw_max_quanta; + bool is_abs_credits; + u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS]; + u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS]; + u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS]; + struct kobject *kobj; + bool stat_offsets_loaded; + struct i40e_eth_stats stats; + struct i40e_eth_stats stats_offsets; +}; + +/* struct that defines a VSI, associated with a dev */ +struct i40e_vsi { + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + bool netdev_registered; + bool stat_offsets_loaded; + + u32 current_netdev_flags; + unsigned long state; +#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) +#define I40E_VSI_FLAG_VEB_OWNER (1<<1) + unsigned long flags; + + struct list_head mac_filter_list; + + /* VSI stats */ + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct i40e_eth_stats eth_stats; + struct i40e_eth_stats eth_stats_offsets; +#ifdef I40E_FCOE + struct i40e_fcoe_stats fcoe_stats; + struct i40e_fcoe_stats fcoe_stats_offsets; + bool fcoe_stat_offsets_loaded; +#endif + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + + /* These are containers of ring pointers, allocated at run-time */ + struct i40e_ring **rx_rings; + struct i40e_ring **tx_rings; + + u16 work_limit; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; + + u16 rss_table_size; + u16 rss_size; + + u16 max_frame; + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; + + /* List of q_vectors allocated to this VSI */ + struct i40e_q_vector **q_vectors; + int num_q_vectors; + int base_vector; + bool irqs_ready; + + u16 seid; /* HW index of this VSI (absolute index) */ + u16 id; /* VSI number */ + u16 uplink_seid; + + u16 base_queue; /* vsi's first queue in hw array */ + u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ + u16 num_queue_pairs; /* Used tx and rx pairs */ + u16 num_desc; + enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ + u16 vf_id; /* Virtual function ID for SRIOV VSIs */ + + struct i40e_tc_configuration tc_config; + struct i40e_aqc_vsi_properties_data info; + + /* VSI BW limit (absolute across all TCs) */ + u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */ + + /* Relative TC credits across VSIs */ + u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; + /* TC BW limit credits within VSI */ + u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS]; + /* TC BW limit max quanta within VSI */ + u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS]; + + struct i40e_pf *back; /* Backreference to associated PF */ + u16 idx; /* index in pf->vsi[] */ + u16 veb_idx; /* index of VEB parent */ + struct kobject *kobj; /* sysfs object */ + + /* VSI specific handlers */ + irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ +} ____cacheline_internodealigned_in_smp; + +struct i40e_netdev_priv { + struct i40e_vsi *vsi; +}; + +/* struct that defines an interrupt vector */ +struct i40e_q_vector { + struct i40e_vsi *vsi; + + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ + + struct napi_struct napi; + + struct i40e_ring_container rx; + struct i40e_ring_container tx; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[I40E_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +/* lan device */ +struct i40e_device { + struct list_head list; + struct i40e_pf *pf; +}; + +/** + * i40e_fw_version_str - format the FW and NVM version strings + * @hw: ptr to the hardware info + **/ +static inline char *i40e_fw_version_str(struct i40e_hw *hw) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> + I40E_NVM_VERSION_HI_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> + I40E_NVM_VERSION_LO_SHIFT, + (hw->nvm.eetrack & 0xffffff)); + + return buf; +} + +/** + * i40e_netdev_to_pf: Retrieve the PF struct for given netdev + * @netdev: the corresponding netdev + * + * Return the PF struct for the given netdev + **/ +static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + return vsi->back; +} + +static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi, + irqreturn_t (*irq_handler)(int, void *)) +{ + vsi->irq_handler = irq_handler; +} + +/** + * i40e_rx_is_programming_status - check for programming status descriptor + * @qw: the first quad word of the program status descriptor + * + * The value of in the descriptor length field indicate if this + * is a programming status descriptor for flow director or FCoE + * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise + * it is a packet descriptor. + **/ +static inline bool i40e_rx_is_programming_status(u64 qw) +{ + return I40E_RX_PROG_STATUS_DESC_LENGTH == + (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); +} + +/** + * i40e_get_fd_cnt_all - get the total FD filter space available + * @pf: pointer to the PF struct + **/ +static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) +{ + return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; +} + +/* needed by i40e_ethtool.c */ +int i40e_up(struct i40e_vsi *vsi); +void i40e_down(struct i40e_vsi *vsi); +extern const char i40e_driver_name[]; +extern const char i40e_driver_version_str[]; +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); +void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_eth_stats(struct i40e_vsi *vsi); +struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); +int i40e_fetch_switch_configuration(struct i40e_pf *pf, + bool printconfig); + +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, + struct i40e_pf *pf, bool add); +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add); +void i40e_fdir_check_and_reenable(struct i40e_pf *pf); +u32 i40e_get_current_fd_count(struct i40e_pf *pf); +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_global_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); +void i40e_set_ethtool_ops(struct net_device *netdev); +struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev); +void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, + u16 uplink, u32 param1); +int i40e_vsi_release(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type, + struct i40e_vsi *start_vsi); +#ifdef I40E_FCOE +void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, bool is_add); +#endif +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc); +void i40e_veb_release(struct i40e_veb *veb); + +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); +i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); +void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); +void i40e_vsi_reset_stats(struct i40e_vsi *vsi); +void i40e_pf_reset_stats(struct i40e_pf *pf); +#ifdef CONFIG_DEBUG_FS +void i40e_dbg_pf_init(struct i40e_pf *pf); +void i40e_dbg_pf_exit(struct i40e_pf *pf); +void i40e_dbg_init(void); +void i40e_dbg_exit(void); +#else +static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {} +static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} +static inline void i40e_dbg_init(void) {} +static inline void i40e_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS*/ +void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); +#ifdef I40E_FCOE +struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *storage); +int i40e_set_mac(struct net_device *netdev, void *p); +void i40e_set_rx_mode(struct net_device *netdev); +#endif +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +#ifdef I40E_FCOE +void i40e_tx_timeout(struct net_device *netdev); +int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); +int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); +#endif +int i40e_open(struct net_device *netdev); +int i40e_vsi_open(struct i40e_vsi *vsi); +void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); +int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); +struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); +bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); +struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); +#ifdef I40E_FCOE +int i40e_close(struct net_device *netdev); +int i40e_setup_tc(struct net_device *netdev, u8 tc); +void i40e_netpoll(struct net_device *netdev); +int i40e_fcoe_enable(struct net_device *netdev); +int i40e_fcoe_disable(struct net_device *netdev); +int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt); +u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf); +void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi); +void i40e_fcoe_vsi_setup(struct i40e_pf *pf); +int i40e_init_pf_fcoe(struct i40e_pf *pf); +int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi); +void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi); +int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb); +void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id); +#endif /* I40E_FCOE */ +void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); +#ifdef CONFIG_I40E_DCB +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg); +void i40e_dcbnl_set_all(struct i40e_vsi *vsi); +void i40e_dcbnl_setup(struct i40e_vsi *vsi); +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg); +#endif /* CONFIG_I40E_DCB */ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); +void i40e_ptp_set_increment(struct i40e_pf *pf); +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_init(struct i40e_pf *pf); +void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); +#endif /* _I40E_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c new file mode 100644 index 000000000..3e0d20037 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -0,0 +1,1034 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +static void i40e_resume_aq(struct i40e_hw *hw); + +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ + return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) || + (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update)); +} + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +static void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.asq.bal = I40E_PF_ATQBAL; + hw->aq.asq.bah = I40E_PF_ATQBAH; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + hw->aq.arq.bal = I40E_PF_ARQBAL; + hw->aq.arq.bah = I40E_PF_ARQBAH; + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); + wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); + wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + hw->aq.asq.count = hw->aq.num_asq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + hw->aq.arq.count = hw->aq.num_arq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.asq_mutex); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + + mutex_unlock(&hw->aq.asq_mutex); + + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.arq_mutex); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +/** + * i40e_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +i40e_status i40e_init_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code; + u16 eetrack_lo, eetrack_hi; + int retry = 0; + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* initialize locks */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code) + goto init_adminq_destroy_locks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code) + goto init_adminq_free_asq; + + /* There are some cases where the firmware may not be quite ready + * for AdminQ operations, so we retry the AdminQ setup a few times + * if we see timeouts in this first AQ call. + */ + do { + ret_code = i40e_aq_get_firmware_version(hw, + &hw->aq.fw_maj_ver, + &hw->aq.fw_min_ver, + &hw->aq.fw_build, + &hw->aq.api_maj_ver, + &hw->aq.api_min_ver, + NULL); + if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + break; + retry++; + msleep(100); + i40e_resume_aq(hw); + } while (retry < 10); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_arq; + + /* get the NVM version info */ + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + ret_code = I40E_ERR_FIRMWARE_API_VERSION; + goto init_adminq_free_arq; + } + + /* pre-emptive resource lock release */ + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + + ret_code = i40e_aq_set_hmc_resource_profile(hw, + I40E_HMC_PROFILE_DEFAULT, + 0, + NULL); + ret_code = 0; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_arq: + i40e_shutdown_arq(hw); +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_locks: + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (i40e_check_asq_alive(hw)) + i40e_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + /* destroy the locks */ + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +static u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "%s: ntc %d head %d.\n", __func__, ntc, + rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + desc_cb = *desc; + cb_func(hw, &desc_cb); + } + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40e_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +static bool i40e_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40e_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +i40e_status i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status = 0; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + *details = *cmd_details; + + /* If the cmd_details are defined copy the cookie. The + * cpu_to_le32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + cpu_to_le32(upper_32_bits(details->cookie)); + desc->cookie_low = + cpu_to_le32(lower_32_bits(details->cookie)); + } + } else { + memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~cpu_to_le16(details->flags_dis); + desc->flags |= cpu_to_le16(details->flags_ena); + + mutex_lock(&hw->aq.asq_mutex); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + *desc_on_ring = *desc; + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + memcpy(dma_buff->va, buff, buff_size); + desc_on_ring->datalen = cpu_to_le16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + cpu_to_le32(upper_32_bits(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + cpu_to_le32(lower_32_bits(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40e_asq_done(hw)) + break; + usleep_range(1000, 2000); + total_delay++; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40e_asq_done(hw)) { + *desc = *desc_on_ring; + if (buff != NULL) + memcpy(buff, dma_buff->va, buff_size); + retval = le16_to_cpu(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = 0; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: desc and buffer writeback:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + +asq_send_command_error: + mutex_unlock(&hw->aq.asq_mutex); +asq_send_command_exit: + return status; +} + +/** + * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); +} + +/** + * i40e_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +i40e_status i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + i40e_status ret_code = 0; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* take the lock before we start messing with the ring */ + mutex_lock(&hw->aq.arq_mutex); + + /* set next_to_use to head */ + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + flags = le16_to_cpu(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + e->desc = *desc; + datalen = le16_to_cpu(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len); + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->datalen = cpu_to_le16((u16)bi->size); + desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + mutex_unlock(&hw->aq.arq_mutex); + + if (i40e_is_nvm_update_op(&e->desc)) { + if (hw->aq.nvm_release_on_done) { + i40e_release_nvm(hw); + hw->aq.nvm_release_on_done = false; + } + } + + return ret_code; +} + +static void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h new file mode 100644 index 000000000..28e519a50 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -0,0 +1,157 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + bool nvm_release_on_done; + + struct mutex asq_mutex; /* Send queue lock */ + struct mutex arq_mutex; /* Receive queue lock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_rc: AdminQ error code to convert + **/ +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ + +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h new file mode 100644 index 000000000..929e3d72a --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -0,0 +1,2337 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, + i40e_aqc_opc_lldp_set_local_mib = 0x0A08, + i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response + */ + +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3; + __le16 oper_app_prio; + u8 reserved4; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + __le32 tlv_status; +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_set_local_mib { +#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) + u8 type; + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_stop_start_specific_agent { +#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 +#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ + (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + u8 command; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_alloc.h new file mode 100644 index 000000000..926811ad4 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -0,0 +1,58 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +i40e_status i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +i40e_status i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c new file mode 100644 index 000000000..0bae22da0 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -0,0 +1,3644 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_virtchnl.h" + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +static i40e_status i40e_set_mac_type(struct i40e_hw *hw) +{ + i40e_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_A: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: + hw->mac.type = I40E_MAC_XL710; + break; + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + hw->mac.type = I40E_MAC_VF; + break; + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40e_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); + u8 *buf = (u8 *)buffer; + u16 i = 0; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); + } + } +} + +/** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); + else + return false; +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + +/** + * i40e_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This assigns the MAC type and PHY code and inits the NVM. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The i40e_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +i40e_status i40e_init_shared_code(struct i40e_hw *hw) +{ + i40e_status status = 0; + u32 port, ari, func_rid; + + i40e_set_mac_type(hw); + + switch (hw->mac.type) { + case I40E_MAC_XL710: + break; + default: + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->phy.get_link_info = true; + + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); + else + hw->pf_id = (u8)(func_rid & 0x7); + + status = i40e_init_nvm(hw); + return status; +} + +/** + * i40e_aq_mac_address_read - Retrieve the MAC addresses + * @hw: pointer to the hw struct + * @flags: a return indicator of what addresses were added to the addr store + * @addrs: the requestor's mac addr store + * @cmd_details: pointer to command details structure or NULL + **/ +static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, + u16 *flags, + struct i40e_aqc_mac_address_read_data *addrs, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_read *cmd_data = + (struct i40e_aqc_mac_address_read *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, addrs, + sizeof(*addrs), cmd_details); + *flags = le16_to_cpu(cmd_data->command_flags); + + return status; +} + +/** + * i40e_aq_mac_address_write - Change the MAC addresses + * @hw: pointer to the hw struct + * @flags: indicates which MAC to be written + * @mac_addr: address to write + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_write *cmd_data = + (struct i40e_aqc_mac_address_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_mac_address_write); + cmd_data->command_flags = cpu_to_le16(flags); + cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[4] << 8) | + mac_addr[5]); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_mac_addr - get MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to MAC address + * + * Reads the adapter's MAC address from register + **/ +i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + + if (flags & I40E_AQC_LAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); + + return status; +} + +/** + * i40e_get_port_mac_addr - get Port MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to Port MAC address + * + * Reads the adapter's Port MAC address + **/ +i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_PORT_ADDR_VALID) + memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * i40e_pre_tx_queue_cfg - pre tx queue configure + * @hw: pointer to the HW structure + * @queue: target PF queue index + * @enable: state change request + * + * Handles hw requirement to indicate intention to enable + * or disable target queue. + **/ +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) +{ + u32 abs_queue_idx = hw->func_caps.base_queue + queue; + u32 reg_block = 0; + u32 reg_val; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + + if (enable) + reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; + else + reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); +} +#ifdef I40E_FCOE + +/** + * i40e_get_san_mac_addr - get SAN MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to SAN MAC address + * + * Reads the adapter's SAN MAC address from NVM + **/ +i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_SAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} +#endif + +/** + * i40e_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + i40e_status status = 0; + u16 pba_word = 0; + u16 pba_size = 0; + u16 pba_ptr = 0; + u16 i = 0; + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); + if (status || (pba_word != 0xFAFA)) { + hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); + if (status) { + hw_dbg(hw, "Failed to read PBA Block pointer.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); + if (status) { + hw_dbg(hw, "Failed to read PBA Block size.\n"); + return status; + } + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size) + */ + pba_size--; + if (pba_num_size < (((u32)pba_size * 2) + 1)) { + hw_dbg(hw, "Buffer to small for PBA data.\n"); + return I40E_ERR_PARAM; + } + + for (i = 0; i < pba_size; i++) { + status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + if (status) { + hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); + return status; + } + + pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + pba_num[(pba_size * 2)] = '\0'; + + return status; +} + +/** + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure + **/ +static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) +{ + enum i40e_media_type media; + + switch (hw->phy.link_info.phy_type) { + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + case I40E_PHY_TYPE_40GBASE_SR4: + case I40E_PHY_TYPE_40GBASE_LR4: + media = I40E_MEDIA_TYPE_FIBER; + break; + case I40E_PHY_TYPE_100BASE_TX: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_10GBASE_T: + media = I40E_MEDIA_TYPE_BASET; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: + media = I40E_MEDIA_TYPE_DA; + break; + case I40E_PHY_TYPE_1000BASE_KX: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + media = I40E_MEDIA_TYPE_BACKPLANE; + break; + case I40E_PHY_TYPE_SGMII: + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + default: + media = I40E_MEDIA_TYPE_UNKNOWN; + break; + } + + return media; +} + +#define I40E_PF_RESET_WAIT_COUNT_A0 200 +#define I40E_PF_RESET_WAIT_COUNT 200 +/** + * i40e_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * Assuming someone else has triggered a global reset, + * assure the global reset is complete and then reset the PF + **/ +i40e_status i40e_pf_reset(struct i40e_hw *hw) +{ + u32 cnt = 0; + u32 cnt1 = 0; + u32 reg = 0; + u32 grst_del; + + /* Poll for Global Reset steady state in case of recent GRST. + * The grst delay value is in 100ms units, and we'll wait a + * couple counts longer to be sure we don't just miss the end. + */ + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + for (cnt = 0; cnt < grst_del + 2; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + break; + msleep(100); + } + if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + hw_dbg(hw, "Global reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + + /* Now Wait for the FW to be ready */ + for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { + reg = rd32(hw, I40E_GLNVM_ULD); + reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); + if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { + hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); + break; + } + usleep_range(10000, 20000); + } + if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { + hw_dbg(hw, "wait for FW Reset complete timedout\n"); + hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); + return I40E_ERR_RESET_FAILED; + } + + /* If there was a Global Reset in progress when we got here, + * we don't need to do the PF Reset + */ + if (!cnt) { + if (hw->revision_id == 0) + cnt = I40E_PF_RESET_WAIT_COUNT_A0; + else + cnt = I40E_PF_RESET_WAIT_COUNT; + reg = rd32(hw, I40E_PFGEN_CTRL); + wr32(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + for (; cnt; cnt--) { + reg = rd32(hw, I40E_PFGEN_CTRL); + if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) + break; + usleep_range(1000, 2000); + } + if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + hw_dbg(hw, "PF reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + } + + i40e_clear_pxe_mode(hw); + + return 0; +} + +/** + * i40e_clear_hw - clear out any left over hw state + * @hw: pointer to the hw struct + * + * Clear queues and interrupts, typically called at init time, + * but after the capabilities have been found so we know how many + * queues and msix vectors have been allocated. + **/ +void i40e_clear_hw(struct i40e_hw *hw) +{ + u32 num_queues, base_queue; + u32 num_pf_int; + u32 num_vf_int; + u32 num_vfs; + u32 i, j; + u32 val; + u32 eol = 0x7ff; + + /* get number of interrupts, queues, and VFs */ + val = rd32(hw, I40E_GLPCI_CNF2); + num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; + num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + + val = rd32(hw, I40E_PFLAN_QALLOC); + base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> + I40E_PFLAN_QALLOC_LASTQ_SHIFT; + if (val & I40E_PFLAN_QALLOC_VALID_MASK) + num_queues = (j - base_queue) + 1; + else + num_queues = 0; + + val = rd32(hw, I40E_PF_VT_PFALLOC); + i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + num_vfs = (j - i) + 1; + else + num_vfs = 0; + + /* stop all the interrupts */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i), val); + + /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ + val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_LNKLSTN(i), val); + val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; + for (i = 0; i < num_vfs; i++) + wr32(hw, I40E_VPINT_LNKLST0(i), val); + for (i = 0; i < num_vf_int - 2; i++) + wr32(hw, I40E_VPINT_LNKLSTN(i), val); + + /* warn the HW of the coming Tx disables */ + for (i = 0; i < num_queues; i++) { + u32 abs_queue_idx = base_queue + i; + u32 reg_block = 0; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); + } + udelay(400); + + /* stop all the queues */ + for (i = 0; i < num_queues; i++) { + wr32(hw, I40E_QINT_TQCTL(i), 0); + wr32(hw, I40E_QTX_ENA(i), 0); + wr32(hw, I40E_QINT_RQCTL(i), 0); + wr32(hw, I40E_QRX_ENA(i), 0); + } + + /* short wait for all queue disables to settle */ + udelay(50); +} + +/** + * i40e_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the hw struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + **/ +void i40e_clear_pxe_mode(struct i40e_hw *hw) +{ + u32 reg; + + if (i40e_check_asq_alive(hw)) + i40e_aq_clear_pxe_mode(hw, NULL); + + /* Clear single descriptor fetch/write-back mode */ + reg = rd32(hw, I40E_GLLAN_RCTL_0); + + if (hw->revision_id == 0) { + /* As a work around clear PXE_MODE instead of setting it */ + wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); + } else { + wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); + } +} + +/** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ + u32 gpio_val = 0; + u32 port; + + if (!hw->func_caps.led[idx]) + return 0; + + gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); + port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> + I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + + /* if PRT_NUM_NA is 1 then this LED is not port specific, OR + * if it is not our port then ignore + */ + if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || + (port != hw->port)) + return 0; + + return gpio_val; +} + +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE +#define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 + +/** + * i40e_led_get - return current on/off mode + * @hw: pointer to the hw struct + * + * The value returned is the 'mode' field as defined in the + * GPIO register definitions: 0x0 = off, 0xf = on, and other + * values are variations of possible behaviors relating to + * blink, link, and wire. + **/ +u32 i40e_led_get(struct i40e_hw *hw) +{ + u32 current_mode = 0; + u32 mode = 0; + int i; + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; + } + + return mode; +} + +/** + * i40e_led_set - set new on/off mode + * @hw: pointer to the hw struct + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state. + **/ +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) +{ + u32 current_mode = 0; + int i; + + if (mode & 0xfffffff0) + hw_dbg(hw, "invalid mode passed in %X\n", mode); + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; + /* this & is a bit of paranoia, but serves as a range check */ + gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & + I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + + if (mode == I40E_LINK_ACTIVITY) + blink = false; + + if (blink) + gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + else + gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + + wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); + break; + } +} + +/* Admin command wrappers */ + +/** + * i40e_aq_get_phy_capabilities + * @hw: pointer to the hw struct + * @abilities: structure for PHY capabilities to be filled + * @qualified_modules: report Qualified Modules + * @report_init: report init capabilities (active are default) + * @cmd_details: pointer to command details structure or NULL + * + * Returns the various PHY abilities supported on the Port. + **/ +i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); + + if (!abilities) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + if (qualified_modules) + desc.params.external.param0 |= + cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); + + if (report_init) + desc.params.external.param0 |= + cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); + + status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, + cmd_details); + + if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) + status = I40E_ERR_UNKNOWN_PHY; + + return status; +} + +/** + * i40e_aq_set_phy_config + * @hw: pointer to the hw struct + * @config: structure with PHY configuration to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters + * supported on the Port.One or more of the Set PHY config parameters may be + * ignored in an MFP mode as the PF may not have the privilege to set some + * of the PHY Config parameters. This status will be indicated by the + * command response. + **/ +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_phy_config *cmd = + (struct i40e_aq_set_phy_config *)&desc.params.raw; + enum i40e_status_code status; + + if (!config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_config); + + *cmd = *config; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + u8 pause_mask = 0x0; + + *aq_failures = 0x0; + + switch (fc_mode) { + case I40E_FC_FULL: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_RX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_TX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + break; + default: + break; + } + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; + return status; + } + + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities != abilities.abilities) { + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + status = i40e_aq_set_phy_config(hw, &config, NULL); + + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + } + /* Update the link info */ + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + msleep(1000); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + } + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; + + return status; +} + +/** + * i40e_aq_clear_pxe_mode + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Tell the firmware that the driver is taking over from PXE + **/ +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_clear_pxe *cmd = + (struct i40e_aqc_clear_pxe *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_pxe_mode); + + cmd->rx_cnt = 0x2; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + wr32(hw, I40E_GLLAN_RCTL_0, 0x1); + + return status; +} + +/** + * i40e_aq_set_link_restart_an + * @hw: pointer to the hw struct + * @enable_link: if true: enable link, if false: disable link + * @cmd_details: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + **/ +i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_link_restart_an *cmd = + (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_link_restart_an); + + cmd->command = I40E_AQ_PHY_RESTART_AN; + if (enable_link) + cmd->command |= I40E_AQ_PHY_LINK_ENABLE; + else + cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_link_info + * @hw: pointer to the hw struct + * @enable_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cmd_details: pointer to command details structure or NULL + * + * Returns the link status of the adapter. + **/ +i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_link_status *resp = + (struct i40e_aqc_get_link_status *)&desc.params.raw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + i40e_status status; + bool tx_pause, rx_pause; + u16 command_flags; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); + + if (enable_lse) + command_flags = I40E_AQ_LSE_ENABLE; + else + command_flags = I40E_AQ_LSE_DISABLE; + resp->command_flags = cpu_to_le16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status) + goto aq_get_link_info_exit; + + /* save off old link status information */ + hw->phy.link_info_old = *hw_link_info; + + /* update link status */ + hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; + hw->phy.media_type = i40e_get_media_type(hw); + hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; + hw_link_info->link_info = resp->link_info; + hw_link_info->an_info = resp->an_info; + hw_link_info->ext_info = resp->ext_info; + hw_link_info->loopback = resp->loopback; + hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); + hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; + + /* update fc info */ + tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); + rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); + if (tx_pause & rx_pause) + hw->fc.current_mode = I40E_FC_FULL; + else if (tx_pause) + hw->fc.current_mode = I40E_FC_TX_PAUSE; + else if (rx_pause) + hw->fc.current_mode = I40E_FC_RX_PAUSE; + else + hw->fc.current_mode = I40E_FC_NONE; + + if (resp->config & I40E_AQ_CONFIG_CRC_ENA) + hw_link_info->crc_enable = true; + else + hw_link_info->crc_enable = false; + + if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) + hw_link_info->lse_enable = true; + else + hw_link_info->lse_enable = false; + + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + + /* save link status information */ + if (link) + *link = *hw_link_info; + + /* flag cleared so helper functions don't call AQ again */ + hw->phy.get_link_info = false; + +aq_get_link_info_exit: + return status; +} + +/** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = cpu_to_le16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Add a VSI context to the hardware. +**/ +i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_vsi); + + cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); + cmd->connection_type = vsi_ctx->connection_type; + cmd->vf_id = vsi_ctx->vf_num; + cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + if (status) + goto aq_add_vsi_exit; + + vsi_ctx->seid = le16_to_cpu(resp->seid); + vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + +aq_add_vsi_exit: + return status; +} + +/** + * i40e_aq_set_vsi_unicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set unicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_multicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set multicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_broadcast + * @hw: pointer to the hw struct + * @seid: vsi number + * @set_filter: true to set filter, false to clear filter + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear the broadcast promiscuous flag (filter) for a given VSI. + **/ +i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 seid, bool set_filter, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set_filter) + cmd->promiscuous_flags + |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + else + cmd->promiscuous_flags + &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_vsi_params - get VSI configuration info + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_vsi_parameters); + + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), NULL); + + if (status) + goto aq_get_vsi_params_exit; + + vsi_ctx->seid = le16_to_cpu(resp->seid); + vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + +aq_get_vsi_params_exit: + return status; +} + +/** + * i40e_aq_update_vsi_params + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Update a VSI context. + **/ +i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_update_vsi_parameters); + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + return status; +} + +/** + * i40e_aq_get_switch_config + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of input buffer + * @start_seid: seid to start for the report, 0 == beginning + * @cmd_details: pointer to command details structure or NULL + * + * Fill the buf with switch configuration returned from AdminQ command + **/ +i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *scfg = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_config); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + scfg->seid = cpu_to_le16(*start_seid); + + status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); + *start_seid = le16_to_cpu(scfg->seid); + + return status; +} + +/** + * i40e_aq_get_firmware_version + * @hw: pointer to the hw struct + * @fw_major_version: firmware major version + * @fw_minor_version: firmware minor version + * @fw_build: firmware build number + * @api_major_version: major queue version + * @api_minor_version: minor queue version + * @cmd_details: pointer to command details structure or NULL + * + * Get the firmware version from the admin queue commands + **/ +i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_version *resp = + (struct i40e_aqc_get_version *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (fw_major_version) + *fw_major_version = le16_to_cpu(resp->fw_major); + if (fw_minor_version) + *fw_minor_version = le16_to_cpu(resp->fw_minor); + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) + *api_major_version = le16_to_cpu(resp->api_major); + if (api_minor_version) + *api_minor_version = le16_to_cpu(resp->api_minor); + } + + return status; +} + +/** + * i40e_aq_send_driver_version + * @hw: pointer to the hw struct + * @dv: driver's major, minor version + * @cmd_details: pointer to command details structure or NULL + * + * Send the driver version to the firmware + **/ +i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_driver_version *cmd = + (struct i40e_aqc_driver_version *)&desc.params.raw; + i40e_status status; + u16 len; + + if (dv == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); + + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + cmd->driver_major_ver = dv->major_version; + cmd->driver_minor_ver = dv->minor_version; + cmd->driver_build_ver = dv->build_version; + cmd->driver_subbuild_ver = dv->subbuild_version; + + len = 0; + while (len < sizeof(dv->driver_string) && + (dv->driver_string[len] < 0x80) && + dv->driver_string[len]) + len++; + status = i40e_asq_send_command(hw, &desc, dv->driver_string, + len, cmd_details); + + return status; +} + +/** + * i40e_get_link_status - get status of the HW network link + * @hw: pointer to the hw struct + * + * Returns true if link is up, false if link is down. + * + * Side effect: LinkStatusEvent reporting becomes enabled + **/ +bool i40e_get_link_status(struct i40e_hw *hw) +{ + i40e_status status = 0; + bool link_status = false; + + if (hw->phy.get_link_info) { + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + + if (status) + goto i40e_get_link_status_exit; + } + + link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + +i40e_get_link_status_exit: + return link_status; +} + +/** + * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC + * @hw: pointer to the hw struct + * @uplink_seid: the MAC or other gizmo SEID + * @downlink_seid: the VSI SEID + * @enabled_tc: bitmap of TCs to be enabled + * @default_port: true for default port VSI, false for control port + * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support + * @veb_seid: pointer to where to put the resulting VEB SEID + * @cmd_details: pointer to command details structure or NULL + * + * This asks the FW to add a VEB between the uplink and downlink + * elements. If the uplink SEID is 0, this will be a floating VEB. + **/ +i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, bool enable_l2_filtering, + u16 *veb_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_veb *cmd = + (struct i40e_aqc_add_veb *)&desc.params.raw; + struct i40e_aqc_add_veb_completion *resp = + (struct i40e_aqc_add_veb_completion *)&desc.params.raw; + i40e_status status; + u16 veb_flags = 0; + + /* SEIDs need to either both be set or both be 0 for floating VEB */ + if (!!uplink_seid != !!downlink_seid) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); + + cmd->uplink_seid = cpu_to_le16(uplink_seid); + cmd->downlink_seid = cpu_to_le16(downlink_seid); + cmd->enable_tcs = enabled_tc; + if (!uplink_seid) + veb_flags |= I40E_AQC_ADD_VEB_FLOATING; + if (default_port) + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; + else + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + + if (enable_l2_filtering) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + + cmd->veb_flags = cpu_to_le16(veb_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && veb_seid) + *veb_seid = le16_to_cpu(resp->veb_seid); + + return status; +} + +/** + * i40e_aq_get_veb_parameters - Retrieve VEB parameters + * @hw: pointer to the hw struct + * @veb_seid: the SEID of the VEB to query + * @switch_id: the uplink switch id + * @floating: set to true if the VEB is floating + * @statistic_index: index of the stats counter block for this VEB + * @vebs_used: number of VEB's used by function + * @vebs_free: total VEB's not reserved by any function + * @cmd_details: pointer to command details structure or NULL + * + * This retrieves the parameters for a particular VEB, specified by + * uplink_seid, and returns them to the caller. + **/ +i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, + bool *floating, u16 *statistic_index, + u16 *vebs_used, u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_veb_parameters_completion *cmd_resp = + (struct i40e_aqc_get_veb_parameters_completion *) + &desc.params.raw; + i40e_status status; + + if (veb_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_veb_parameters); + cmd_resp->seid = cpu_to_le16(veb_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status) + goto get_veb_exit; + + if (switch_id) + *switch_id = le16_to_cpu(cmd_resp->switch_id); + if (statistic_index) + *statistic_index = le16_to_cpu(cmd_resp->statistic_index); + if (vebs_used) + *vebs_used = le16_to_cpu(cmd_resp->vebs_used); + if (vebs_free) + *vebs_free = le16_to_cpu(cmd_resp->vebs_free); + if (floating) { + u16 flags = le16_to_cpu(cmd_resp->veb_flags); + if (flags & I40E_AQC_ADD_VEB_FLOATING) + *floating = true; + else + *floating = false; + } + +get_veb_exit: + return status; +} + +/** + * i40e_aq_add_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Add MAC/VLAN addresses to the HW filtering + **/ +i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); + cmd->num_addresses = cpu_to_le16(count); + cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Remove MAC/VLAN addresses from the HW filtering + **/ +i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); + cmd->num_addresses = cpu_to_le16(count); + cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * send msg to vf + **/ +i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pf_vf_message *cmd = + (struct i40e_aqc_pf_vf_message *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); + cmd->id = cpu_to_le32(vfid); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); + + return status; +} + +/** + * i40e_aq_debug_read_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the register using the admin queue commands + **/ +i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd_resp = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | + (u64)le32_to_cpu(cmd_resp->value_low); + } + + return status; +} + +/** + * i40e_aq_debug_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Write to a register using the admin queue commands + **/ +i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); + cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_hmc_resource_profile + * @hw: pointer to the hw struct + * @profile: type of profile the HMC is to be set as + * @pe_vf_enabled_count: the number of PE enabled VFs the system has + * @cmd_details: pointer to command details structure or NULL + * + * set the HMC profile of the device. + **/ +i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_get_set_hmc_resource_profile *cmd = + (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_hmc_resource_profile); + + cmd->pm_profile = (u8)profile; + cmd->pe_vf_enabled = pe_vf_enabled_count; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_request_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cmd_details: pointer to command details structure or NULL + * + * requests common resource using the admin queue commands + **/ +i40e_status i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd_resp = + (struct i40e_aqc_request_resource *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); + + cmd_resp->resource_id = cpu_to_le16(resource); + cmd_resp->access_type = cpu_to_le16(access); + cmd_resp->resource_number = cpu_to_le32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + * If the resource is held by someone else, the command completes with + * busy return value and the timeout field indicates the maximum time + * the current owner of the resource has to free it. + */ + if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) + *timeout = le32_to_cpu(cmd_resp->timeout); + + return status; +} + +/** + * i40e_aq_release_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @sdp_number: resource number + * @cmd_details: pointer to command details structure or NULL + * + * release common resource using the admin queue commands + **/ +i40e_status i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd = + (struct i40e_aqc_request_resource *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); + + cmd->resource_id = cpu_to_le16(resource); + cmd->resource_number = cpu_to_le32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_read_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands + **/ +i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_read_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_read_nvm_exit: + return status; +} + +/** + * i40e_aq_erase_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in the module (expressed in 4 KB from module's beginning) + * @length: length of the section to be erased (expressed in 4 KB) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands + **/ +i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_erase_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_erase_nvm_exit: + return status; +} + +#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 +#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 +#define I40E_DEV_FUNC_CAP_NPAR 0x03 +#define I40E_DEV_FUNC_CAP_OS2BMC 0x04 +#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 +#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 +#define I40E_DEV_FUNC_CAP_VF 0x13 +#define I40E_DEV_FUNC_CAP_VMDQ 0x14 +#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 +#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 +#define I40E_DEV_FUNC_CAP_VSI 0x17 +#define I40E_DEV_FUNC_CAP_DCB 0x18 +#define I40E_DEV_FUNC_CAP_FCOE 0x21 +#define I40E_DEV_FUNC_CAP_ISCSI 0x22 +#define I40E_DEV_FUNC_CAP_RSS 0x40 +#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 +#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 +#define I40E_DEV_FUNC_CAP_MSIX 0x43 +#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 +#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 +#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 +#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_CEM 0xF2 +#define I40E_DEV_FUNC_CAP_IWARP 0x51 +#define I40E_DEV_FUNC_CAP_LED 0x61 +#define I40E_DEV_FUNC_CAP_SDP 0x62 +#define I40E_DEV_FUNC_CAP_MDIO 0x63 +#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 + +/** + * i40e_parse_discover_capabilities + * @hw: pointer to the hw struct + * @buff: pointer to a buffer containing device/function capability records + * @cap_count: number of capability records in the list + * @list_type_opc: type of capabilities list to parse + * + * Parse the device/function capabilities list. + **/ +static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + u32 cap_count, + enum i40e_admin_queue_opc list_type_opc) +{ + struct i40e_aqc_list_capabilities_element_resp *cap; + u32 valid_functions, num_functions; + u32 number, logical_id, phys_id; + struct i40e_hw_capabilities *p; + u32 i = 0; + u16 id; + + cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; + + if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) + p = &hw->dev_caps; + else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) + p = &hw->func_caps; + else + return; + + for (i = 0; i < cap_count; i++, cap++) { + id = le16_to_cpu(cap->id); + number = le32_to_cpu(cap->number); + logical_id = le32_to_cpu(cap->logical_id); + phys_id = le32_to_cpu(cap->phys_id); + + switch (id) { + case I40E_DEV_FUNC_CAP_SWITCH_MODE: + p->switch_mode = number; + break; + case I40E_DEV_FUNC_CAP_MGMT_MODE: + p->management_mode = number; + break; + case I40E_DEV_FUNC_CAP_NPAR: + p->npar_enable = number; + break; + case I40E_DEV_FUNC_CAP_OS2BMC: + p->os2bmc = number; + break; + case I40E_DEV_FUNC_CAP_VALID_FUNC: + p->valid_functions = number; + break; + case I40E_DEV_FUNC_CAP_SRIOV_1_1: + if (number == 1) + p->sr_iov_1_1 = true; + break; + case I40E_DEV_FUNC_CAP_VF: + p->num_vfs = number; + p->vf_base_id = logical_id; + break; + case I40E_DEV_FUNC_CAP_VMDQ: + if (number == 1) + p->vmdq = true; + break; + case I40E_DEV_FUNC_CAP_802_1_QBG: + if (number == 1) + p->evb_802_1_qbg = true; + break; + case I40E_DEV_FUNC_CAP_802_1_QBH: + if (number == 1) + p->evb_802_1_qbh = true; + break; + case I40E_DEV_FUNC_CAP_VSI: + p->num_vsis = number; + break; + case I40E_DEV_FUNC_CAP_DCB: + if (number == 1) { + p->dcb = true; + p->enabled_tcmap = logical_id; + p->maxtc = phys_id; + } + break; + case I40E_DEV_FUNC_CAP_FCOE: + if (number == 1) + p->fcoe = true; + break; + case I40E_DEV_FUNC_CAP_ISCSI: + if (number == 1) + p->iscsi = true; + break; + case I40E_DEV_FUNC_CAP_RSS: + p->rss = true; + p->rss_table_size = number; + p->rss_table_entry_width = logical_id; + break; + case I40E_DEV_FUNC_CAP_RX_QUEUES: + p->num_rx_qp = number; + p->base_queue = phys_id; + break; + case I40E_DEV_FUNC_CAP_TX_QUEUES: + p->num_tx_qp = number; + p->base_queue = phys_id; + break; + case I40E_DEV_FUNC_CAP_MSIX: + p->num_msix_vectors = number; + break; + case I40E_DEV_FUNC_CAP_MSIX_VF: + p->num_msix_vectors_vf = number; + break; + case I40E_DEV_FUNC_CAP_MFP_MODE_1: + if (number == 1) + p->mfp_mode_1 = true; + break; + case I40E_DEV_FUNC_CAP_CEM: + if (number == 1) + p->mgmt_cem = true; + break; + case I40E_DEV_FUNC_CAP_IWARP: + if (number == 1) + p->iwarp = true; + break; + case I40E_DEV_FUNC_CAP_LED: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->led[phys_id] = true; + break; + case I40E_DEV_FUNC_CAP_SDP: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->sdp[phys_id] = true; + break; + case I40E_DEV_FUNC_CAP_MDIO: + if (number == 1) { + p->mdio_port_num = phys_id; + p->mdio_port_mode = logical_id; + } + break; + case I40E_DEV_FUNC_CAP_IEEE_1588: + if (number == 1) + p->ieee_1588 = true; + break; + case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: + p->fd = true; + p->fd_filters_guaranteed = number; + p->fd_filters_best_effort = logical_id; + break; + case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + p->wr_csr_prot = (u64)number; + p->wr_csr_prot |= (u64)logical_id << 32; + break; + default: + break; + } + } + + if (p->fcoe) + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); + + /* Software override ensuring FCoE is disabled if npar or mfp + * mode because it is not supported in these modes. + */ + if (p->npar_enable || p->mfp_mode_1) + p->fcoe = false; + + /* count the enabled ports (aka the "not disabled" ports) */ + hw->num_ports = 0; + for (i = 0; i < 4; i++) { + u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); + u64 port_cfg = 0; + + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ + i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); + if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) + hw->num_ports++; + } + + valid_functions = p->valid_functions; + num_functions = 0; + while (valid_functions) { + if (valid_functions & 1) + num_functions++; + valid_functions >>= 1; + } + + /* partition id is 1-based, and functions are evenly spread + * across the ports as partitions + */ + hw->partition_id = (hw->pf_id / hw->num_ports) + 1; + hw->num_partitions = num_functions / hw->num_ports; + + /* additional HW specific goodies that might + * someday be HW version specific + */ + p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; +} + +/** + * i40e_aq_discover_capabilities + * @hw: pointer to the hw struct + * @buff: a virtual buffer to hold the capabilities + * @buff_size: Size of the virtual buffer + * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM + * @list_type_opc: capabilities type to discover - pass in the command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Get the device capabilities descriptions from the firmware + **/ +i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_list_capabilites *cmd; + struct i40e_aq_desc desc; + i40e_status status = 0; + + cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; + + if (list_type_opc != i40e_aqc_opc_list_func_capabilities && + list_type_opc != i40e_aqc_opc_list_dev_capabilities) { + status = I40E_ERR_PARAM; + goto exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + *data_size = le16_to_cpu(desc.datalen); + + if (status) + goto exit; + + i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), + list_type_opc); + +exit: + return status; +} + +/** + * i40e_aq_update_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands + **/ +i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_update_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_update_nvm_exit: + return status; +} + +/** + * i40e_aq_get_lldp_mib + * @hw: pointer to the hw struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @local_len : length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). + **/ +i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_get_mib *cmd = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + struct i40e_aqc_lldp_get_mib *resp = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); + /* Indirect Command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + + cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; + cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + + desc.datalen = cpu_to_le16(buff_size); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (local_len != NULL) + *local_len = le16_to_cpu(resp->local_len); + if (remote_len != NULL) + *remote_len = le16_to_cpu(resp->remote_len); + } + + return status; +} + +/** + * i40e_aq_cfg_lldp_mib_change_event + * @hw: pointer to the hw struct + * @enable_update: Enable or Disable event posting + * @cmd_details: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes + **/ +i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_update_mib *cmd = + (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); + + if (!enable_update) + cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_stop_lldp + * @hw: pointer to the hw struct + * @shutdown_agent: True if LLDP Agent needs to be Shutdown + * @cmd_details: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent + **/ +i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop *cmd = + (struct i40e_aqc_lldp_stop *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); + + if (shutdown_agent) + cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_start_lldp + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. + **/ +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_start *cmd = + (struct i40e_aqc_lldp_start *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); + + cmd->command = I40E_AQ_LLDP_AGENT_START; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add + * @header_len: length of the tunneling header length in DWords + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_udp_tunnel *cmd = + (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; + struct i40e_aqc_del_udp_tunnel_completion *resp = + (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + + cmd->udp_port = cpu_to_le16(udp_port); + cmd->protocol_type = protocol_index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && filter_index) + *filter_index = resp->index; + + return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_udp_tunnel *cmd = + (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + + cmd->index = index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_delete_element - Delete switch element + * @hw: pointer to the hw struct + * @seid: the SEID to delete from the switch + * @cmd_details: pointer to command details structure or NULL + * + * This deletes a switch element from the switch. + **/ +i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *cmd = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + i40e_status status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); + + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler + * @hw: pointer to the hw struct + * @seid: seid for the physical port/switching component/vsi + * @buff: Indirect buffer to hold data parameters and response + * @buff_size: Indirect buffer size + * @opcode: Tx scheduler AQ command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Generic command handler for Tx scheduler AQ commands + **/ +static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, + void *buff, u16 buff_size, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_tx_sched_ind *cmd = + (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; + i40e_status status; + bool cmd_param_flag = false; + + switch (opcode) { + case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: + case i40e_aqc_opc_configure_vsi_tc_bw: + case i40e_aqc_opc_enable_switching_comp_ets: + case i40e_aqc_opc_modify_switching_comp_ets: + case i40e_aqc_opc_disable_switching_comp_ets: + case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: + case i40e_aqc_opc_configure_switching_comp_bw_config: + cmd_param_flag = true; + break; + case i40e_aqc_opc_query_vsi_bw_config: + case i40e_aqc_opc_query_vsi_ets_sla_config: + case i40e_aqc_opc_query_switching_comp_ets_config: + case i40e_aqc_opc_query_port_ets_config: + case i40e_aqc_opc_query_switching_comp_bw_config: + cmd_param_flag = false; + break; + default: + return I40E_ERR_PARAM; + } + + i40e_fill_default_direct_cmd_desc(&desc, opcode); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (cmd_param_flag) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(buff_size); + + cmd->vsi_seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit + * @hw: pointer to the hw struct + * @seid: VSI seid + * @credit: BW limit credits (0 = disabled) + * @max_credit: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_vsi_bw_limit *cmd = + (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_vsi_bw_limit); + + cmd->vsi_seid = cpu_to_le16(seid); + cmd->credit = cpu_to_le16(credit); + cmd->max_credit = max_credit; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_tc_bw, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, + sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_bw_config - Query VSI BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration per TC + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_ets_sla_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's per TC BW config + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI or switching component connected to Physical Port + * @bw_data: Buffer to hold current ETS configuration for the Physical Port + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_port_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_validate_filter_settings + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Check and validate the filter control settings passed. + * The function checks for the valid filter/context sizes being + * passed for FCoE and PE. + * + * Returns 0 if the values passed are valid and within + * range else returns an error. + **/ +static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + u32 fcoe_cntx_size, fcoe_filt_size; + u32 pe_cntx_size, pe_filt_size; + u32 fcoe_fmax; + u32 val; + + /* Validate FCoE settings passed */ + switch (settings->fcoe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + fcoe_filt_size <<= (u32)settings->fcoe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->fcoe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* Validate PE settings passed */ + switch (settings->pe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + case I40E_HASH_FILTER_SIZE_64K: + case I40E_HASH_FILTER_SIZE_128K: + case I40E_HASH_FILTER_SIZE_256K: + case I40E_HASH_FILTER_SIZE_512K: + case I40E_HASH_FILTER_SIZE_1M: + pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + pe_filt_size <<= (u32)settings->pe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->pe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + case I40E_DMA_CNTX_SIZE_8K: + case I40E_DMA_CNTX_SIZE_16K: + case I40E_DMA_CNTX_SIZE_32K: + case I40E_DMA_CNTX_SIZE_64K: + case I40E_DMA_CNTX_SIZE_128K: + case I40E_DMA_CNTX_SIZE_256K: + pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + pe_cntx_size <<= (u32)settings->pe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ + val = rd32(hw, I40E_GLHMC_FCOEFMAX); + fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) + >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; + if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) + return I40E_ERR_INVALID_SIZE; + + return 0; +} + +/** + * i40e_set_filter_control + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Set the Queue Filters for PE/FCoE and enable filters required + * for a single PF. It is expected that these settings are programmed + * at the driver initialization time. + **/ +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + i40e_status ret = 0; + u32 hash_lut_size = 0; + u32 val; + + if (!settings) + return I40E_ERR_PARAM; + + /* Validate the input settings */ + ret = i40e_validate_filter_settings(hw, settings); + if (ret) + return ret; + + /* Read the PF Queue Filter control register */ + val = rd32(hw, I40E_PFQF_CTL_0); + + /* Program required PE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; + val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEHSIZE_MASK; + /* Program required PE contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; + val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEDSIZE_MASK; + + /* Program required FCoE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + val |= ((u32)settings->fcoe_filt_num << + I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + /* Program required FCoE DDP contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + val |= ((u32)settings->fcoe_cntx_num << + I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + + /* Program Hash LUT size for the PF */ + val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) + hash_lut_size = 1; + val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & + I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + + /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ + if (settings->enable_fdir) + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + if (settings->enable_ethtype) + val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; + if (settings->enable_macvlan) + val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; + + wr32(hw, I40E_PFQF_CTL_0, val); + + return 0; +} + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_control_packet_filter *cmd = + (struct i40e_aqc_add_remove_control_packet_filter *) + &desc.params.raw; + struct i40e_aqc_add_remove_control_packet_filter_completion *resp = + (struct i40e_aqc_add_remove_control_packet_filter_completion *) + &desc.params.raw; + i40e_status status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + if (is_add) { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_control_packet_filter); + cmd->queue = cpu_to_le16(queue); + } else { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_control_packet_filter); + } + + if (mac_addr) + memcpy(cmd->mac, mac_addr, ETH_ALEN); + + cmd->etype = cpu_to_le16(ethtype); + cmd->flags = cpu_to_le16(flags); + cmd->seid = cpu_to_le16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stats) { + stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); + stats->etype_used = le16_to_cpu(resp->etype_used); + stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); + stats->etype_free = le16_to_cpu(resp->etype_free); + } + + return status; +} + +/** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ + hw->bus.type = i40e_bus_type_pci_express; + + switch (link_status & PCI_EXP_LNKSTA_NLW) { + case PCI_EXP_LNKSTA_NLW_X1: + hw->bus.width = i40e_bus_width_pcie_x1; + break; + case PCI_EXP_LNKSTA_NLW_X2: + hw->bus.width = i40e_bus_width_pcie_x2; + break; + case PCI_EXP_LNKSTA_NLW_X4: + hw->bus.width = i40e_bus_width_pcie_x4; + break; + case PCI_EXP_LNKSTA_NLW_X8: + hw->bus.width = i40e_bus_width_pcie_x8; + break; + default: + hw->bus.width = i40e_bus_width_unknown; + break; + } + + switch (link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + hw->bus.speed = i40e_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + hw->bus.speed = i40e_bus_speed_5000; + break; + case PCI_EXP_LNKSTA_CLS_8_0GB: + hw->bus.speed = i40e_bus_speed_8000; + break; + default: + hw->bus.speed = i40e_bus_speed_unknown; + break; + } +} + +/** + * i40e_aq_debug_dump + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table id within cluster + * @start_index: index of line in the block to read + * @buff_size: dump buffer size + * @buff: dump buffer + * @ret_buff_size: actual buffer size returned + * @ret_next_table: next block to read + * @ret_next_index: next index to read + * + * Dump internal FW/HW data for debug purposes. + * + **/ +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_dump_internals *cmd = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + struct i40e_aqc_debug_dump_internals *resp = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_debug_dump_internals); + /* Indirect Command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + cmd->cluster_id = cluster_id; + cmd->table_id = table_id; + cmd->idx = cpu_to_le32(start_index); + + desc.datalen = cpu_to_le16(buff_size); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (ret_buff_size) + *ret_buff_size = le16_to_cpu(desc.datalen); + if (ret_next_table) + *ret_next_table = resp->table_id; + if (ret_next_index) + *ret_next_index = le32_to_cpu(resp->idx); + } + + return status; +} + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c new file mode 100644 index 000000000..2547aa21b --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -0,0 +1,720 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ + u32 reg; + + if (!status) + return I40E_ERR_PARAM; + + reg = rd32(hw, I40E_PRTDCB_GENS); + *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> + I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + + return 0; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> + I40E_IEEE_ETS_WILLING_SHIFT); + etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> + I40E_IEEE_ETS_CBS_SHIFT); + etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> + I40E_IEEE_ETS_MAXTC_SHIFT); + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* Move offset to priority table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + dcbcfg->etsrec.prioritytable[i*2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> + I40E_IEEE_PFC_WILLING_SHIFT); + dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> + I40E_IEEE_PFC_MBC_SHIFT); + dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> + I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength; + u16 offset = 0; + u16 length; + int i = 0; + u8 *buf; + + typelength = ntohs(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + buf = tlv->tlvinfo; + + /* The App priority table starts 5 octets after TLV header */ + length -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < length) { + dcbcfg->app[i].priority = (u8)((buf[offset] & + I40E_IEEE_APP_PRIO_MASK) >> + I40E_IEEE_APP_PRIO_SHIFT); + dcbcfg->app[i].selector = (u8)((buf[offset] & + I40E_IEEE_APP_SEL_MASK) >> + I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + switch (subtype) { + case I40E_IEEE_SUBTYPE_ETS_CFG: + i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_ETS_REC: + i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_PFC_CFG: + i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_APP_PRI: + i40e_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = ntohl(tlv->ouisubtype); + oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> + I40E_LLDP_TLV_OUI_SHIFT); + switch (oui) { + case I40E_IEEE_8021QAZ_OUI: + i40e_parse_ieee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_lldp_org_tlv *tlv; + u16 type; + u16 length; + u16 typelength; + u16 offset = 0; + + if (!lldpmib || !dcbcfg) + return I40E_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += ETH_HLEN; + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + typelength = ntohs(tlv->typelength); + type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; + + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; + + switch (type) { + case I40E_TLV_TYPE_ORG: + i40e_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + + length); + } + + return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_virt_mem mem; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, + (void *)lldpmib, I40E_LLDPDU_SIZE, + NULL, NULL, NULL); + if (ret) + goto free_mem; + + /* Parse LLDP MIB to get dcb configuration */ + ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + /* Add APPs if Error is False */ + if (!err) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = 0; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 IEEE only */ + if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4)) + goto ieee; + + /* If Firmware version == v4.33 use old CEE struct */ + if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + goto ieee; + else + goto out; + +ieee: + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + +out: + return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * + * Update DCB configuration from the Firmware + **/ +i40e_status i40e_init_dcb(struct i40e_hw *hw) +{ + i40e_status ret = 0; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; + + if (!hw->func_caps.dcb) + return ret; + + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + switch (hw->dcbx_status) { + case I40E_DCBX_STATUS_DONE: + case I40E_DCBX_STATUS_IN_PROGRESS: + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + if (ret) + return ret; + break; + case I40E_DCBX_STATUS_DISABLED: + return ret; + case I40E_DCBX_STATUS_NOT_STARTED: + case I40E_DCBX_STATUS_MULTIPLE_PEERS: + default: + break; + } + + /* Configure the LLDP MIB change event */ + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + if (ret) + return ret; + + return ret; +} + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + i40e_status ret = 0; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h new file mode 100644 index 000000000..e137e3fac --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -0,0 +1,112 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_STATUS_NOT_STARTED 0 +#define I40E_DCBX_STATUS_IN_PROGRESS 1 +#define I40E_DCBX_STATUS_DONE 2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 +#define I40E_DCBX_STATUS_DISABLED 7 + +#define I40E_TLV_TYPE_END 0 +#define I40E_TLV_TYPE_ORG 127 + +#define I40E_IEEE_8021QAZ_OUI 0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG 9 +#define I40E_IEEE_SUBTYPE_ETS_REC 10 +#define I40E_IEEE_SUBTYPE_PFC_CFG 11 +#define I40E_IEEE_SUBTYPE_APP_PRI 12 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_TLV_LEN_SHIFT 0 +#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT 9 +#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 +#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT 8 +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT 0 +#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT 6 +#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT 7 +#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 +#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 +#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT 0 +#define I40E_IEEE_TSA_ETS 2 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT 0 +#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT 6 +#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT 7 +#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT 0 +#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT 5 +#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) + + +#pragma pack(1) + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { + __be16 typelength; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; +#pragma pack() + +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_get_dcb_config(struct i40e_hw *hw); +i40e_status i40e_init_dcb(struct i40e_hw *hw); +#endif /* _I40E_DCB_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c new file mode 100644 index 000000000..bd5079d5c --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -0,0 +1,321 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_I40E_DCB +#include "i40e.h" +#include + +/** + * i40e_get_pfc_delay - retrieve PFC Link Delay + * @hw: pointer to hardware struct + * @delay: holds the PFC Link delay value + * + * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA + **/ +static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) +{ + u32 val; + + val = rd32(hw, I40E_PRTDCB_GENC); + *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> + I40E_PRTDCB_GENC_PFCLDA_SHIFT); +} + +/** + * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the ETS information + * + * Returns local IEEE ETS configuration + **/ +static int i40e_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + ets->willing = dcbxcfg->etscfg.willing; + ets->ets_cap = dcbxcfg->etscfg.maxtcs; + ets->cbs = dcbxcfg->etscfg.cbs; + memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->prio_tc)); + memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, + sizeof(ets->tc_reco_bw)); + memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, + sizeof(ets->tc_reco_tsa)); + memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->reco_prio_tc)); + + return 0; +} + +/** + * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the PFC information + * + * Returns local IEEE PFC configuration + **/ +static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + int i; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + pfc->pfc_cap = dcbxcfg->pfc.pfccap; + pfc->pfc_en = dcbxcfg->pfc.pfcenable; + pfc->mbc = dcbxcfg->pfc.mbc; + i40e_get_pfc_delay(hw, &pfc->delay); + + /* Get Requests/Indicatiosn */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = pf->stats.priority_xoff_tx[i]; + pfc->indications[i] = pf->stats.priority_xoff_rx[i]; + } + + return 0; +} + +/** + * i40e_dcbnl_getdcbx - retrieve current DCBx capability + * @netdev: the corresponding netdev + * + * Returns DCBx capability features + **/ +static u8 i40e_dcbnl_getdcbx(struct net_device *dev) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + return pf->dcbx_cap; +} + +/** + * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx + * @netdev: the corresponding netdev + * + * Returns the SAN MAC address used for LLDP exchange + **/ +static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, + u8 *perm_addr) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < dev->addr_len; i++) + perm_addr[i] = pf->hw.mac.perm_addr[i]; + + for (j = 0; j < dev->addr_len; j++, i++) + perm_addr[i] = pf->hw.mac.san_addr[j]; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = i40e_dcbnl_ieee_getets, + .ieee_getpfc = i40e_dcbnl_ieee_getpfc, + .getdcbx = i40e_dcbnl_getdcbx, + .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr, +}; + +/** + * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config + * @vsi: the corresponding vsi + * + * Set up all the IEEE APPs in the DCBNL App Table and generate event for + * other settings + **/ +void i40e_dcbnl_set_all(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + struct dcb_app sapp; + u8 prio, tc_map; + int i; + + /* DCB not enabled */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return; + + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + dcbxcfg = &hw->local_dcbx_config; + + /* Set up all the App TLVs if DCBx is negotiated */ + for (i = 0; i < dcbxcfg->numapps; i++) { + prio = dcbxcfg->app[i].priority; + tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + + /* Add APP only if the TC is enabled for this VSI */ + if (tc_map & vsi->tc_config.enabled_tc) { + sapp.selector = dcbxcfg->app[i].selector; + sapp.protocol = dcbxcfg->app[i].protocolid; + sapp.priority = prio; + dcb_ieee_setapp(dev, &sapp); + } + } + + /* Notify user-space of the changes */ + dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +/** + * i40e_dcbnl_vsi_del_app - Delete APP for given VSI + * @vsi: the corresponding vsi + * @app: APP to delete + * + * Delete given APP from the DCBNL APP table for given + * VSI + **/ +static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, + struct i40e_dcb_app_priority_table *app) +{ + struct net_device *dev = vsi->netdev; + struct dcb_app sapp; + + if (!dev) + return -EINVAL; + + sapp.selector = app->selector; + sapp.protocol = app->protocolid; + sapp.priority = app->priority; + return dcb_ieee_delapp(dev, &sapp); +} + +/** + * i40e_dcbnl_del_app - Delete APP on all VSIs + * @pf: the corresponding PF + * @app: APP to delete + * + * Delete given APP from all the VSIs for given PF + **/ +static void i40e_dcbnl_del_app(struct i40e_pf *pf, + struct i40e_dcb_app_priority_table *app) +{ + int v, err; + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v] && pf->vsi[v]->netdev) { + err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); + if (err) + dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + __func__, pf->vsi[v]->seid, + err, app->selector, + app->protocolid, app->priority); + } + } +} + +/** + * i40e_dcbnl_find_app - Search APP in given DCB config + * @cfg: DCBX configuration data + * @app: APP to search for + * + * Find given APP in the DCB configuration + **/ +static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, + struct i40e_dcb_app_priority_table *app) +{ + int i; + + for (i = 0; i < cfg->numapps; i++) { + if (app->selector == cfg->app[i].selector && + app->protocolid == cfg->app[i].protocolid && + app->priority == cfg->app[i].priority) + return true; + } + + return false; +} + +/** + * i40e_dcbnl_flush_apps - Delete all removed APPs + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data + * @new_cfg: new DCBX configuration data + * + * Find and delete all APPs that are not present in the passed + * DCB configuration + **/ +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg) +{ + struct i40e_dcb_app_priority_table app; + int i; + + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; + /* The APP is not available anymore delete it */ + if (!i40e_dcbnl_find_app(new_cfg, &app)) + i40e_dcbnl_del_app(pf, &app); + } +} + +/** + * i40e_dcbnl_setup - DCBNL setup + * @vsi: the corresponding vsi + * + * Set up DCBNL ops and initial APP TLVs + **/ +void i40e_dcbnl_setup(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + /* Not DCB capable */ + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + return; + + dev->dcbnl_ops = &dcbnl_ops; + + /* Set initial IEEE DCB settings */ + i40e_dcbnl_set_all(vsi); +} +#endif /* CONFIG_I40E_DCB */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c new file mode 100644 index 000000000..da0faf478 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -0,0 +1,2258 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_DEBUG_FS + +#include +#include + +#include "i40e.h" + +static struct dentry *i40e_dbg_root; + +/** + * i40e_dbg_find_vsi - searches for the vsi with the given seid + * @pf - the PF structure to search for the vsi + * @seid - seid of the vsi it is searching for + **/ +static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) +{ + int i; + + if (seid < 0) + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + else + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) + return pf->vsi[i]; + + return NULL; +} + +/** + * i40e_dbg_find_veb - searches for the veb with the given seid + * @pf - the PF structure to search for the veb + * @seid - seid of the veb it is searching for + **/ +static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) +{ + int i; + + if ((seid < I40E_BASE_VEB_SEID) || + (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + else + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == seid) + return pf->veb[i]; + return NULL; +} + +/************************************************************** + * dump + * The dump entry in debugfs is for getting a data snapshow of + * the driver's current configuration and runtime details. + * When the filesystem entry is written, a snapshot is taken. + * When the entry is read, the most recent snapshot data is dumped. + **************************************************************/ +static char *i40e_dbg_dump_buf; +static ssize_t i40e_dbg_dump_data_len; +static ssize_t i40e_dbg_dump_buffer_len; + +/** + * i40e_dbg_dump_read - read the dump data + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int bytes_not_copied; + int len; + + /* is *ppos bigger than the available data? */ + if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) + return 0; + + /* be sure to not read beyond the end of available data */ + len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); + + bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos += len; + return len; +} + +/** + * i40e_dbg_prep_dump_buf + * @pf: the PF we're working with + * @buflen: the desired buffer length + * + * Return positive if success, 0 if failed + **/ +static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) +{ + /* if not already big enough, prep for re alloc */ + if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buffer_len = 0; + i40e_dbg_dump_buf = NULL; + } + + /* get a new buffer if needed */ + if (!i40e_dbg_dump_buf) { + i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); + if (i40e_dbg_dump_buf != NULL) + i40e_dbg_dump_buffer_len = buflen; + } + + return i40e_dbg_dump_buffer_len; +} + +/** + * i40e_dbg_dump_write - trigger a datadump snapshot + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + * + * Any write clears the stats + **/ +static ssize_t i40e_dbg_dump_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + bool seid_found = false; + long seid = -1; + int buflen = 0; + int i, ret; + int len; + u8 *p; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* decode the SEID given to be dumped */ + ret = kstrtol_from_user(buffer, count, 0, &seid); + + if (ret) { + dev_info(&pf->pdev->dev, "bad seid value\n"); + } else if (seid == 0) { + seid_found = true; + + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buffer_len = 0; + i40e_dbg_dump_data_len = 0; + i40e_dbg_dump_buf = NULL; + dev_info(&pf->pdev->dev, "debug buffer freed\n"); + + } else if (seid == pf->pf_seid || seid == 1) { + seid_found = true; + + buflen = sizeof(struct i40e_pf); + buflen += (sizeof(struct i40e_aq_desc) + * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); + + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + p = i40e_dbg_dump_buf; + + len = sizeof(struct i40e_pf); + memcpy(p, pf, len); + p += len; + + len = (sizeof(struct i40e_aq_desc) + * pf->hw.aq.num_asq_entries); + memcpy(p, pf->hw.aq.asq.desc_buf.va, len); + p += len; + + len = (sizeof(struct i40e_aq_desc) + * pf->hw.aq.num_arq_entries); + memcpy(p, pf->hw.aq.arq.desc_buf.va, len); + p += len; + + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "PF seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + } else if (seid >= I40E_BASE_VSI_SEID) { + struct i40e_vsi *vsi = NULL; + struct i40e_mac_filter *f; + int filter_count = 0; + + mutex_lock(&pf->switch_mutex); + vsi = i40e_dbg_find_vsi(pf, seid); + if (!vsi) { + mutex_unlock(&pf->switch_mutex); + goto write_exit; + } + + buflen = sizeof(struct i40e_vsi); + buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; + buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; + buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; + buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; + list_for_each_entry(f, &vsi->mac_filter_list, list) + filter_count++; + buflen += sizeof(struct i40e_mac_filter) * filter_count; + + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + p = i40e_dbg_dump_buf; + seid_found = true; + + len = sizeof(struct i40e_vsi); + memcpy(p, vsi, len); + p += len; + + if (vsi->num_q_vectors) { + len = (sizeof(struct i40e_q_vector) + * vsi->num_q_vectors); + memcpy(p, vsi->q_vectors, len); + p += len; + } + + if (vsi->num_queue_pairs) { + len = (sizeof(struct i40e_ring) * + vsi->num_queue_pairs); + memcpy(p, vsi->tx_rings, len); + p += len; + memcpy(p, vsi->rx_rings, len); + p += len; + } + + if (vsi->tx_rings[0]) { + len = sizeof(struct i40e_tx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->tx_rings[i]->tx_bi, len); + p += len; + } + len = sizeof(struct i40e_rx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->rx_rings[i]->rx_bi, len); + p += len; + } + } + + /* macvlan filter list */ + len = sizeof(struct i40e_mac_filter); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + memcpy(p, f, len); + p += len; + } + + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "VSI seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + mutex_unlock(&pf->switch_mutex); + } else if (seid >= I40E_BASE_VEB_SEID) { + struct i40e_veb *veb = NULL; + + mutex_lock(&pf->switch_mutex); + veb = i40e_dbg_find_veb(pf, seid); + if (!veb) { + mutex_unlock(&pf->switch_mutex); + goto write_exit; + } + + buflen = sizeof(struct i40e_veb); + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + seid_found = true; + memcpy(i40e_dbg_dump_buf, veb, buflen); + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "VEB seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + mutex_unlock(&pf->switch_mutex); + } + +write_exit: + if (!seid_found) + dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); + + return count; +} + +static const struct file_operations i40e_dbg_dump_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_dump_read, + .write = i40e_dbg_dump_write, +}; + +/************************************************************** + * command + * The command entry in debugfs is for giving the driver commands + * to be executed - these may be for changing the internal switch + * setup, adding or removing filters, or other things. Many of + * these will be useful for some forms of unit testing. + **************************************************************/ +static char i40e_dbg_command_buf[256] = ""; + +/** + * i40e_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + int buf_size = 256; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + if (count < buf_size) + return -ENOSPC; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + len = snprintf(buf, buf_size, "%s: %s\n", + pf->vsi[pf->lan_vsi]->netdev->name, + i40e_dbg_command_buf); + + bytes_not_copied = copy_to_user(buffer, buf, len); + kfree(buf); + + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum + * @pf: the i40e_pf created in command write + * @seid: the seid the user put in + **/ +static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) +{ + struct rtnl_link_stats64 *nstat; + struct i40e_mac_filter *f; + struct i40e_vsi *vsi; + int i; + + vsi = i40e_dbg_find_vsi(pf, seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "dump %d: seid not found\n", seid); + return; + } + dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); + if (vsi->netdev) + dev_info(&pf->pdev->dev, + " netdev: name = %s\n", + vsi->netdev->name); + if (vsi->active_vlans) + dev_info(&pf->pdev->dev, + " vlgrp: & = %p\n", vsi->active_vlans); + dev_info(&pf->pdev->dev, + " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", + vsi->netdev_registered, + vsi->current_netdev_flags, vsi->state, vsi->flags); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + dev_info(&pf->pdev->dev, + " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", + f->macaddr, f->vlan, f->is_netdev, f->is_vf, + f->counter); + } + nstat = i40e_get_vsi_stats_struct(vsi); + dev_info(&pf->pdev->dev, + " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", + (long unsigned int)nstat->rx_packets, + (long unsigned int)nstat->rx_bytes, + (long unsigned int)nstat->rx_errors, + (long unsigned int)nstat->rx_dropped); + dev_info(&pf->pdev->dev, + " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", + (long unsigned int)nstat->tx_packets, + (long unsigned int)nstat->tx_bytes, + (long unsigned int)nstat->tx_errors, + (long unsigned int)nstat->tx_dropped); + dev_info(&pf->pdev->dev, + " net_stats: multicast = %lu, collisions = %lu\n", + (long unsigned int)nstat->multicast, + (long unsigned int)nstat->collisions); + dev_info(&pf->pdev->dev, + " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", + (long unsigned int)nstat->rx_length_errors, + (long unsigned int)nstat->rx_over_errors, + (long unsigned int)nstat->rx_crc_errors); + dev_info(&pf->pdev->dev, + " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", + (long unsigned int)nstat->rx_frame_errors, + (long unsigned int)nstat->rx_fifo_errors, + (long unsigned int)nstat->rx_missed_errors); + dev_info(&pf->pdev->dev, + " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", + (long unsigned int)nstat->tx_aborted_errors, + (long unsigned int)nstat->tx_carrier_errors, + (long unsigned int)nstat->tx_fifo_errors); + dev_info(&pf->pdev->dev, + " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", + (long unsigned int)nstat->tx_heartbeat_errors, + (long unsigned int)nstat->tx_window_errors); + dev_info(&pf->pdev->dev, + " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", + (long unsigned int)nstat->rx_compressed, + (long unsigned int)nstat->tx_compressed); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_packets, + (long unsigned int)vsi->net_stats_offsets.rx_bytes, + (long unsigned int)vsi->net_stats_offsets.rx_errors, + (long unsigned int)vsi->net_stats_offsets.rx_dropped); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_packets, + (long unsigned int)vsi->net_stats_offsets.tx_bytes, + (long unsigned int)vsi->net_stats_offsets.tx_errors, + (long unsigned int)vsi->net_stats_offsets.tx_dropped); + dev_info(&pf->pdev->dev, + " net_stats_offsets: multicast = %lu, collisions = %lu\n", + (long unsigned int)vsi->net_stats_offsets.multicast, + (long unsigned int)vsi->net_stats_offsets.collisions); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_length_errors, + (long unsigned int)vsi->net_stats_offsets.rx_over_errors, + (long unsigned int)vsi->net_stats_offsets.rx_crc_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_frame_errors, + (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors, + (long unsigned int)vsi->net_stats_offsets.rx_missed_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors, + (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors, + (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors, + (long unsigned int)vsi->net_stats_offsets.tx_window_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_compressed, + (long unsigned int)vsi->net_stats_offsets.tx_compressed); + dev_info(&pf->pdev->dev, + " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", + vsi->tx_restart, vsi->tx_busy, + vsi->rx_buf_failed, vsi->rx_page_failed); + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + if (!rx_ring) + continue; + + dev_info(&pf->pdev->dev, + " rx_rings[%i]: desc = %p\n", + i, rx_ring->desc); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", + i, rx_ring->dev, + rx_ring->netdev, + rx_ring->rx_bi); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, rx_ring->state, + rx_ring->queue_index, + rx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", + i, rx_ring->rx_hdr_len, + rx_ring->rx_buf_len, + rx_ring->dtype); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, rx_ring->hsplit, + rx_ring->next_to_use, + rx_ring->next_to_clean, + rx_ring->ring_active); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", + i, rx_ring->stats.packets, + rx_ring->stats.bytes, + rx_ring->rx_stats.non_eop_descs); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", + i, + rx_ring->rx_stats.alloc_page_failed, + rx_ring->rx_stats.alloc_buff_failed); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, rx_ring->size, + (long unsigned int)rx_ring->dma); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: vsi = %p, q_vector = %p\n", + i, rx_ring->vsi, + rx_ring->q_vector); + } + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + + dev_info(&pf->pdev->dev, + " tx_rings[%i]: desc = %p\n", + i, tx_ring->desc); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", + i, tx_ring->dev, + tx_ring->netdev, + tx_ring->tx_bi); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, tx_ring->state, + tx_ring->queue_index, + tx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dtype = %d\n", + i, tx_ring->dtype); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->hsplit, + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->ring_active); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", + i, tx_ring->stats.packets, + tx_ring->stats.bytes, + tx_ring->tx_stats.restart_queue); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + i, + tx_ring->tx_stats.tx_busy, + tx_ring->tx_stats.tx_done_old); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, tx_ring->size, + (long unsigned int)tx_ring->dma); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: vsi = %p, q_vector = %p\n", + i, tx_ring->vsi, + tx_ring->q_vector); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: DCB tc = %d\n", + i, tx_ring->dcb_tc); + } + rcu_read_unlock(); + dev_info(&pf->pdev->dev, + " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", + vsi->work_limit, vsi->rx_itr_setting, + ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed", + vsi->tx_itr_setting, + ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed"); + dev_info(&pf->pdev->dev, + " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", + vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); + dev_info(&pf->pdev->dev, + " num_q_vectors = %i, base_vector = %i\n", + vsi->num_q_vectors, vsi->base_vector); + dev_info(&pf->pdev->dev, + " seid = %d, id = %d, uplink_seid = %d\n", + vsi->seid, vsi->id, vsi->uplink_seid); + dev_info(&pf->pdev->dev, + " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n", + vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc); + dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); + dev_info(&pf->pdev->dev, + " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", + vsi->info.valid_sections, vsi->info.switch_id); + dev_info(&pf->pdev->dev, + " info: sw_reserved[] = 0x%02x 0x%02x\n", + vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); + dev_info(&pf->pdev->dev, + " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", + vsi->info.sec_flags, vsi->info.sec_reserved); + dev_info(&pf->pdev->dev, + " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", + vsi->info.pvid, vsi->info.fcoe_pvid, + vsi->info.port_vlan_flags); + dev_info(&pf->pdev->dev, + " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", + vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], + vsi->info.pvlan_reserved[2]); + dev_info(&pf->pdev->dev, + " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", + vsi->info.ingress_table, vsi->info.egress_table); + dev_info(&pf->pdev->dev, + " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", + vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, + vsi->info.cas_pv_reserved); + dev_info(&pf->pdev->dev, + " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], + vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], + vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], + vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); + dev_info(&pf->pdev->dev, + " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], + vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], + vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], + vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); + dev_info(&pf->pdev->dev, + " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], + vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], + vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], + vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); + dev_info(&pf->pdev->dev, + " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", + vsi->info.queueing_opt_flags, + vsi->info.queueing_opt_reserved[0], + vsi->info.queueing_opt_reserved[1], + vsi->info.queueing_opt_reserved[2]); + dev_info(&pf->pdev->dev, + " info: up_enable_bits = 0x%02x\n", + vsi->info.up_enable_bits); + dev_info(&pf->pdev->dev, + " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", + vsi->info.sched_reserved, vsi->info.outer_up_table); + dev_info(&pf->pdev->dev, + " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", + vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], + vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], + vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], + vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); + dev_info(&pf->pdev->dev, + " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.qs_handle[0], vsi->info.qs_handle[1], + vsi->info.qs_handle[2], vsi->info.qs_handle[3], + vsi->info.qs_handle[4], vsi->info.qs_handle[5], + vsi->info.qs_handle[6], vsi->info.qs_handle[7]); + dev_info(&pf->pdev->dev, + " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", + vsi->info.stat_counter_idx, vsi->info.sched_id); + dev_info(&pf->pdev->dev, + " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", + vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], + vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], + vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], + vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], + vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], + vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); + if (vsi->back) + dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); + dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); + dev_info(&pf->pdev->dev, + " tc_config: numtc = %d, enabled_tc = 0x%x\n", + vsi->tc_config.numtc, vsi->tc_config.enabled_tc); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, + " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", + i, vsi->tc_config.tc_info[i].qoffset, + vsi->tc_config.tc_info[i].qcount, + vsi->tc_config.tc_info[i].netdev_tc); + } + dev_info(&pf->pdev->dev, + " bw: bw_limit = %d, bw_max_quanta = %d\n", + vsi->bw_limit, vsi->bw_max_quanta); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, + " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", + i, vsi->bw_ets_share_credits[i], + vsi->bw_ets_limit_credits[i], + vsi->bw_ets_max_quanta[i]); + } +#ifdef I40E_FCOE + if (vsi->type == I40E_VSI_FCOE) { + dev_info(&pf->pdev->dev, + " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n", + vsi->fcoe_stats.rx_fcoe_packets, + vsi->fcoe_stats.rx_fcoe_dwords, + vsi->fcoe_stats.rx_fcoe_dropped); + dev_info(&pf->pdev->dev, + " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n", + vsi->fcoe_stats.tx_fcoe_packets, + vsi->fcoe_stats.tx_fcoe_dwords); + dev_info(&pf->pdev->dev, + " fcoe_stats: bad_crc = %llu, last_error = %llu\n", + vsi->fcoe_stats.fcoe_bad_fccrc, + vsi->fcoe_stats.fcoe_last_error); + dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n", + vsi->fcoe_stats.fcoe_ddp_count); + } +#endif +} + +/** + * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) +{ + struct i40e_adminq_ring *ring; + struct i40e_hw *hw = &pf->hw; + char hdr[32]; + int i; + + snprintf(hdr, sizeof(hdr), "%s %s: ", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + + /* first the send (command) ring, then the receive (event) ring */ + dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); + ring = &(hw->aq.asq); + for (i = 0; i < ring->count; i++) { + struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, + " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", + i, d->flags, d->opcode, d->datalen, d->retval, + d->cookie_high, d->cookie_low); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); + } + + dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); + ring = &(hw->aq.arq); + for (i = 0; i < ring->count; i++) { + struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, + " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", + i, d->flags, d->opcode, d->datalen, d->retval, + d->cookie_high, d->cookie_low); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); + } +} + +/** + * i40e_dbg_dump_desc - handles dump desc write into command datum + * @cnt: number of arguments that the user supplied + * @vsi_seid: vsi id entered by user + * @ring_id: ring id entered by user + * @desc_n: descriptor number entered by user + * @pf: the i40e_pf created in command write + * @is_rx_ring: true if rx, false if tx + **/ +static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, + struct i40e_pf *pf, bool is_rx_ring) +{ + struct i40e_tx_desc *txd; + union i40e_rx_desc *rxd; + struct i40e_ring *ring; + struct i40e_vsi *vsi; + int i; + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); + return; + } + if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { + dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); + return; + } + if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { + dev_info(&pf->pdev->dev, + "descriptor rings have not been allocated for vsi %d\n", + vsi_seid); + return; + } + + ring = kmemdup(is_rx_ring + ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], + sizeof(*ring), GFP_KERNEL); + if (!ring) + return; + + if (cnt == 2) { + dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", + vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); + for (i = 0; i < ring->count; i++) { + if (!is_rx_ring) { + txd = I40E_TX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx\n", + i, txd->buffer_addr, + txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx\n", + i, rxd->read.pkt_addr, + rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + i, rxd->read.pkt_addr, + rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } + } + } else if (cnt == 3) { + if (desc_n >= ring->count || desc_n < 0) { + dev_info(&pf->pdev->dev, + "descriptor %d not found\n", desc_n); + goto out; + } + if (!is_rx_ring) { + txd = I40E_TX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + txd->buffer_addr, txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } + } else { + dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); + } + +out: + kfree(ring); +} + +/** + * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i]) + dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", + i, pf->vsi[i]->seid); +} + +/** + * i40e_dbg_dump_stats - handles dump stats write into command datum + * @pf: the i40e_pf created in command write + * @estats: the eth stats structure to be dumped + **/ +static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, + struct i40e_eth_stats *estats) +{ + dev_info(&pf->pdev->dev, " ethstats:\n"); + dev_info(&pf->pdev->dev, + " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", + estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); + dev_info(&pf->pdev->dev, + " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", + estats->rx_broadcast, estats->rx_discards); + dev_info(&pf->pdev->dev, + " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", + estats->rx_unknown_protocol, estats->tx_bytes); + dev_info(&pf->pdev->dev, + " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", + estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); + dev_info(&pf->pdev->dev, + " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", + estats->tx_discards, estats->tx_errors); +} + +/** + * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb + * @pf: the i40e_pf created in command write + * @seid: the seid the user put in + **/ +static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) +{ + struct i40e_veb *veb; + + if ((seid < I40E_BASE_VEB_SEID) || + (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + return; + } + + veb = i40e_dbg_find_veb(pf, seid); + if (!veb) { + dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); + return; + } + dev_info(&pf->pdev->dev, + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", + veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + i40e_dbg_dump_eth_stats(pf, &veb->stats); +} + +/** + * i40e_dbg_dump_veb_all - dumps all known veb's stats + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) +{ + struct i40e_veb *veb; + int i; + + for (i = 0; i < I40E_MAX_VEB; i++) { + veb = pf->veb[i]; + if (veb) + i40e_dbg_dump_veb_seid(pf, veb->seid); + } +} + +/** + * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR + * @pf: the PF that would be altered + * @flag: flag that needs enabling or disabling + * @enable: Enable/disable FD SD/ATR + **/ +static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) +{ + if (enable) { + pf->flags |= flag; + } else { + pf->flags &= ~flag; + pf->auto_disable_flags |= flag; + } + dev_info(&pf->pdev->dev, "requesting a PF reset\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); +} + +#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) +/** + * i40e_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_command_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + struct i40e_vsi *vsi; + int vsi_seid; + int veb_seid; + int cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied < 0) { + kfree(cmd_buf); + return bytes_not_copied; + } + if (bytes_not_copied > 0) + count -= bytes_not_copied; + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "add vsi", 7) == 0) { + vsi_seid = -1; + cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); + if (cnt == 0) { + /* default to PF VSI */ + vsi_seid = pf->vsi[pf->lan_vsi]->seid; + } else if (vsi_seid < 0) { + dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", + vsi_seid); + goto command_write_done; + } + + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); + if (vsi) + dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", + vsi->seid, vsi->uplink_seid); + else + dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); + + } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { + sscanf(&cmd_buf[7], "%i", &vsi_seid); + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", + vsi_seid); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); + i40e_vsi_release(vsi); + + } else if (strncmp(cmd_buf, "add relay", 9) == 0) { + struct i40e_veb *veb; + int uplink_seid, i; + + cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add relay: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } else if (uplink_seid < 0) { + dev_info(&pf->pdev->dev, + "add relay %d: bad uplink seid\n", + uplink_seid); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "add relay: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) + break; + if (i >= I40E_MAX_VEB && uplink_seid != 0 && + uplink_seid != pf->mac_seid) { + dev_info(&pf->pdev->dev, + "add relay: relay uplink %d not found\n", + uplink_seid); + goto command_write_done; + } + + veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, + vsi->tc_config.enabled_tc); + if (veb) + dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); + else + dev_info(&pf->pdev->dev, "add relay failed\n"); + + } else if (strncmp(cmd_buf, "del relay", 9) == 0) { + int i; + cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del relay: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } else if (veb_seid < 0) { + dev_info(&pf->pdev->dev, + "del relay %d: bad relay seid\n", veb_seid); + goto command_write_done; + } + + /* find the veb */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == veb_seid) + break; + if (i >= I40E_MAX_VEB) { + dev_info(&pf->pdev->dev, + "del relay: relay %d not found\n", veb_seid); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); + i40e_veb_release(pf->veb[i]); + + } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { + struct i40e_mac_filter *f; + int vlan = 0; + u8 ma[6]; + int ret; + + cnt = sscanf(&cmd_buf[11], + "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", + &vsi_seid, + &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], + &vlan); + if (cnt == 7) { + vlan = 0; + } else if (cnt != 8) { + dev_info(&pf->pdev->dev, + "add macaddr: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "add macaddr: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + f = i40e_add_filter(vsi, ma, vlan, false, false); + ret = i40e_sync_vsi_filters(vsi); + if (f && !ret) + dev_info(&pf->pdev->dev, + "add macaddr: %pM vlan=%d added to VSI %d\n", + ma, vlan, vsi_seid); + else + dev_info(&pf->pdev->dev, + "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n", + ma, vlan, vsi_seid, f, ret); + + } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { + int vlan = 0; + u8 ma[6]; + int ret; + + cnt = sscanf(&cmd_buf[11], + "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", + &vsi_seid, + &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], + &vlan); + if (cnt == 7) { + vlan = 0; + } else if (cnt != 8) { + dev_info(&pf->pdev->dev, + "del macaddr: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "del macaddr: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + i40e_del_filter(vsi, ma, vlan, false, false); + ret = i40e_sync_vsi_filters(vsi); + if (!ret) + dev_info(&pf->pdev->dev, + "del macaddr: %pM vlan=%d removed from VSI %d\n", + ma, vlan, vsi_seid); + else + dev_info(&pf->pdev->dev, + "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n", + ma, vlan, vsi_seid, ret); + + } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { + i40e_status ret; + u16 vid; + unsigned int v; + + cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add pvid: bad command string, cnt=%d\n", cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", + vsi_seid); + goto command_write_done; + } + + vid = v; + ret = i40e_vsi_add_pvid(vsi, vid); + if (!ret) + dev_info(&pf->pdev->dev, + "add pvid: %d added to VSI %d\n", + vid, vsi_seid); + else + dev_info(&pf->pdev->dev, + "add pvid: %d to VSI %d failed, ret=%d\n", + vid, vsi_seid, ret); + + } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { + + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del pvid: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "del pvid: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + i40e_vsi_remove_pvid(vsi); + dev_info(&pf->pdev->dev, + "del pvid: removed from VSI %d\n", vsi_seid); + + } else if (strncmp(cmd_buf, "dump", 4) == 0) { + if (strncmp(&cmd_buf[5], "switch", 6) == 0) { + i40e_fetch_switch_configuration(pf, true); + } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt > 0) + i40e_dbg_dump_vsi_seid(pf, vsi_seid); + else + i40e_dbg_dump_vsi_no_seid(pf); + } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt > 0) + i40e_dbg_dump_veb_seid(pf, vsi_seid); + else + i40e_dbg_dump_veb_all(pf); + } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { + int ring_id, desc_n; + if (strncmp(&cmd_buf[10], "rx", 2) == 0) { + cnt = sscanf(&cmd_buf[12], "%i %i %i", + &vsi_seid, &ring_id, &desc_n); + i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, + desc_n, pf, true); + } else if (strncmp(&cmd_buf[10], "tx", 2) + == 0) { + cnt = sscanf(&cmd_buf[12], "%i %i %i", + &vsi_seid, &ring_id, &desc_n); + i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, + desc_n, pf, false); + } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { + i40e_dbg_dump_aq_desc(pf); + } else { + dev_info(&pf->pdev->dev, + "dump desc tx []\n"); + dev_info(&pf->pdev->dev, + "dump desc rx []\n"); + dev_info(&pf->pdev->dev, "dump desc aq\n"); + } + } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { + dev_info(&pf->pdev->dev, + "core reset count: %d\n", pf->corer_count); + dev_info(&pf->pdev->dev, + "global reset count: %d\n", pf->globr_count); + dev_info(&pf->pdev->dev, + "emp reset count: %d\n", pf->empr_count); + dev_info(&pf->pdev->dev, + "pf reset count: %d\n", pf->pfr_count); + dev_info(&pf->pdev->dev, + "pf tx sluggish count: %d\n", + pf->tx_sluggish_count); + } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { + struct i40e_aqc_query_port_ets_config_resp *bw_data; + struct i40e_dcbx_config *cfg = + &pf->hw.local_dcbx_config; + struct i40e_dcbx_config *r_cfg = + &pf->hw.remote_dcbx_config; + int i, ret; + + bw_data = kzalloc(sizeof( + struct i40e_aqc_query_port_ets_config_resp), + GFP_KERNEL); + if (!bw_data) { + ret = -ENOMEM; + goto command_write_done; + } + + ret = i40e_aq_query_port_ets_config(&pf->hw, + pf->mac_seid, + bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Query Port ETS Config AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(bw_data); + bw_data = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", + bw_data->tc_valid_bits, + bw_data->tc_strict_priority_bits, + le16_to_cpu(bw_data->tc_bw_max[0]), + le16_to_cpu(bw_data->tc_bw_max[1])); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", + bw_data->tc_bw_share_credits[i], + le16_to_cpu(bw_data->tc_bw_limits[i])); + } + + kfree(bw_data); + bw_data = NULL; + + dev_info(&pf->pdev->dev, + "port dcbx_mode=%d\n", cfg->dcbx_mode); + dev_info(&pf->pdev->dev, + "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", + cfg->etscfg.willing, cfg->etscfg.cbs, + cfg->etscfg.maxtcs); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, cfg->etscfg.prioritytable[i], + cfg->etscfg.tcbwtable[i], + cfg->etscfg.tsatable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, cfg->etsrec.prioritytable[i], + cfg->etsrec.tcbwtable[i], + cfg->etsrec.tsatable[i]); + } + dev_info(&pf->pdev->dev, + "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", + cfg->pfc.willing, cfg->pfc.mbc, + cfg->pfc.pfccap, cfg->pfc.pfcenable); + dev_info(&pf->pdev->dev, + "port app_table: num_apps=%d\n", cfg->numapps); + for (i = 0; i < cfg->numapps; i++) { + dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", + i, cfg->app[i].priority, + cfg->app[i].selector, + cfg->app[i].protocolid); + } + /* Peer TLV DCBX data */ + dev_info(&pf->pdev->dev, + "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", + r_cfg->etscfg.willing, + r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, r_cfg->etscfg.prioritytable[i], + r_cfg->etscfg.tcbwtable[i], + r_cfg->etscfg.tsatable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, r_cfg->etsrec.prioritytable[i], + r_cfg->etsrec.tcbwtable[i], + r_cfg->etsrec.tsatable[i]); + } + dev_info(&pf->pdev->dev, + "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", + r_cfg->pfc.willing, + r_cfg->pfc.mbc, + r_cfg->pfc.pfccap, + r_cfg->pfc.pfcenable); + dev_info(&pf->pdev->dev, + "remote port app_table: num_apps=%d\n", + r_cfg->numapps); + for (i = 0; i < r_cfg->numapps; i++) { + dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", + i, r_cfg->app[i].priority, + r_cfg->app[i].selector, + r_cfg->app[i].protocolid); + } + } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { + int cluster_id, table_id; + int index, ret; + u16 buff_len = 4096; + u32 next_index; + u8 next_table; + u8 *buff; + u16 rlen; + + cnt = sscanf(&cmd_buf[18], "%i %i %i", + &cluster_id, &table_id, &index); + if (cnt != 3) { + dev_info(&pf->pdev->dev, + "dump debug fwdata \n"); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata params %x %x %x %x\n", + cluster_id, table_id, index, buff_len); + buff = kzalloc(buff_len, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, + index, buff_len, buff, &rlen, + &next_table, &next_index, + NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "debug dump fwdata AQ Failed %d 0x%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", + rlen, next_table, next_index); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, rlen, true); + kfree(buff); + buff = NULL; + } else { + dev_info(&pf->pdev->dev, + "dump desc tx [], dump desc rx [],\n"); + dev_info(&pf->pdev->dev, "dump switch\n"); + dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, "dump reset stats\n"); + dev_info(&pf->pdev->dev, "dump port\n"); + dev_info(&pf->pdev->dev, + "dump debug fwdata \n"); + } + + } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) { + u32 level; + cnt = sscanf(&cmd_buf[10], "%i", &level); + if (cnt) { + if (I40E_DEBUG_USER & level) { + pf->hw.debug_mask = level; + dev_info(&pf->pdev->dev, + "set hw.debug_mask = 0x%08x\n", + pf->hw.debug_mask); + } + pf->msg_enable = level; + dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n", + pf->msg_enable); + } else { + dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n", + pf->msg_enable); + } + } else if (strncmp(cmd_buf, "pfr", 3) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "corer", 5) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "globr", 5) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "empr", 4) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 address; + u32 value; + cnt = sscanf(&cmd_buf[4], "%i", &address); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address >= I40E_MAX_REGISTER) { + dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", + address); + goto command_write_done; + } + + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", + address, value); + + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 address, value; + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); + if (cnt != 2) { + dev_info(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address >= I40E_MAX_REGISTER) { + dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", + address); + goto command_write_done; + } + wr32(&pf->hw, address, value); + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", + address, value); + } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { + if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { + cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); + if (cnt == 0) { + int i; + for (i = 0; i < pf->num_alloc_vsi; i++) + i40e_vsi_reset_stats(pf->vsi[i]); + dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); + } else if (cnt == 1) { + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "clear_stats vsi: bad vsi %d\n", + vsi_seid); + goto command_write_done; + } + i40e_vsi_reset_stats(vsi); + dev_info(&pf->pdev->dev, + "vsi clear stats called for vsi %d\n", + vsi_seid); + } else { + dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); + } + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } + } else { + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); + } + } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[11], + "%hi %hi %hi %hi %i %i %i %i %i %i", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3); + if (cnt != 10) { + dev_info(&pf->pdev->dev, + "send aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + kfree(desc); + desc = NULL; + } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + u16 buffer_len; + u8 *buff; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[20], + "%hi %hi %hi %hi %i %i %i %i %i %i %hi", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3, + &buffer_len); + if (cnt != 11) { + dev_info(&pf->pdev->dev, + "send indirect aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + /* Just stub a buffer big enough in case user messed up */ + if (buffer_len == 0) + buffer_len = 1280; + + buff = kzalloc(buffer_len, GFP_KERNEL); + if (!buff) { + kfree(desc); + desc = NULL; + goto command_write_done; + } + desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + ret = i40e_asq_send_command(&pf->hw, desc, buff, + buffer_len, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, buffer_len, true); + kfree(buff); + buff = NULL; + kfree(desc); + desc = NULL; + } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || + (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { + struct i40e_fdir_filter fd_data; + u16 packet_len, i, j = 0; + char *asc_packet; + u8 *raw_packet; + bool add = false; + int ret; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + if (strncmp(cmd_buf, "add", 3) == 0) + add = true; + + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); + if (!asc_packet) + goto command_write_done; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); + + if (!raw_packet) { + kfree(asc_packet); + asc_packet = NULL; + goto command_write_done; + } + + cnt = sscanf(&cmd_buf[13], + "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", + &fd_data.q_index, + &fd_data.flex_off, &fd_data.pctype, + &fd_data.dest_vsi, &fd_data.dest_ctl, + &fd_data.fd_status, &fd_data.cnt_index, + &fd_data.fd_id, &packet_len, asc_packet); + if (cnt != 10) { + dev_info(&pf->pdev->dev, + "program fd_filter: bad command string, cnt=%d\n", + cnt); + kfree(asc_packet); + asc_packet = NULL; + kfree(raw_packet); + goto command_write_done; + } + + /* fix packet length if user entered 0 */ + if (packet_len == 0) + packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE; + + /* make sure to check the max as well */ + packet_len = min_t(u16, + packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); + + for (i = 0; i < packet_len; i++) { + sscanf(&asc_packet[j], "%2hhx ", + &raw_packet[i]); + j += 3; + } + dev_info(&pf->pdev->dev, "FD raw packet dump\n"); + print_hex_dump(KERN_INFO, "FD raw packet: ", + DUMP_PREFIX_OFFSET, 16, 1, + raw_packet, packet_len, true); + ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add); + if (!ret) { + dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); + } else { + dev_info(&pf->pdev->dev, + "Filter command send failed %d\n", ret); + } + kfree(raw_packet); + raw_packet = NULL; + kfree(asc_packet); + asc_packet = NULL; + } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); + } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); + } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { + dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", + i40e_get_current_fd_count(pf)); + } else if (strncmp(cmd_buf, "lldp", 4) == 0) { + if (strncmp(&cmd_buf[5], "stop", 4) == 0) { + int ret; + ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Stop LLDP AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, true, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Add Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + goto command_write_done; + } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ + } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { + int ret; + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, false, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Remove Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + /* Continue and start FW LLDP anyways */ + } + + ret = i40e_aq_start_lldp(&pf->hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Start LLDP AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ + } else if (strncmp(&cmd_buf[5], + "get local", 9) == 0) { + u16 llen, rlen; + int ret; + u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_get_lldp_mib(&pf->hw, 0, + I40E_AQ_LLDP_MIB_LOCAL, + buff, I40E_LLDPDU_SIZE, + &llen, &rlen, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Get LLDP MIB (local) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (local): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); + kfree(buff); + buff = NULL; + } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { + u16 llen, rlen; + int ret; + u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_get_lldp_mib(&pf->hw, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + I40E_AQ_LLDP_MIB_REMOTE, + buff, I40E_LLDPDU_SIZE, + &llen, &rlen, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Get LLDP MIB (remote) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); + kfree(buff); + buff = NULL; + } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { + int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, + true, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { + int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, + false, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + } + } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { + u16 buffer_len, bytes; + u16 module; + u32 offset; + u16 *buff; + int ret; + + cnt = sscanf(&cmd_buf[8], "%hx %x %hx", + &module, &offset, &buffer_len); + if (cnt == 0) { + module = 0; + offset = 0; + buffer_len = 0; + } else if (cnt == 1) { + offset = 0; + buffer_len = 0; + } else if (cnt == 2) { + buffer_len = 0; + } else if (cnt > 3) { + dev_info(&pf->pdev->dev, + "nvm read: bad command string, cnt=%d\n", cnt); + goto command_write_done; + } + + /* set the max length */ + buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); + + bytes = 2 * buffer_len; + + /* read at least 1k bytes, no more than 4kB */ + bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); + buff = kzalloc(bytes, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed Acquiring NVM resource for read err=%d status=0x%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(buff); + goto command_write_done; + } + + ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), + bytes, (u8 *)buff, true, NULL); + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, + "Read NVM AQ failed err=%d status=0x%x\n", + ret, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "Read NVM module=0x%x offset=0x%x words=%d\n", + module, offset, buffer_len); + if (bytes) + print_hex_dump(KERN_INFO, "NVM Dump: ", + DUMP_PREFIX_OFFSET, 16, 2, + buff, bytes, true); + } + kfree(buff); + buff = NULL; + } else { + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + dev_info(&pf->pdev->dev, "available commands\n"); + dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); + dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); + dev_info(&pf->pdev->dev, " add relay \n"); + dev_info(&pf->pdev->dev, " del relay \n"); + dev_info(&pf->pdev->dev, " add macaddr [vlan]\n"); + dev_info(&pf->pdev->dev, " del macaddr [vlan]\n"); + dev_info(&pf->pdev->dev, " add pvid \n"); + dev_info(&pf->pdev->dev, " del pvid \n"); + dev_info(&pf->pdev->dev, " dump switch\n"); + dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, " dump desc tx []\n"); + dev_info(&pf->pdev->dev, " dump desc rx []\n"); + dev_info(&pf->pdev->dev, " dump desc aq\n"); + dev_info(&pf->pdev->dev, " dump reset stats\n"); + dev_info(&pf->pdev->dev, " dump debug fwdata \n"); + dev_info(&pf->pdev->dev, " msg_enable [level]\n"); + dev_info(&pf->pdev->dev, " read \n"); + dev_info(&pf->pdev->dev, " write \n"); + dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); + dev_info(&pf->pdev->dev, " pfr\n"); + dev_info(&pf->pdev->dev, " corer\n"); + dev_info(&pf->pdev->dev, " globr\n"); + dev_info(&pf->pdev->dev, " send aq_cmd \n"); + dev_info(&pf->pdev->dev, " send indirect aq_cmd \n"); + dev_info(&pf->pdev->dev, " add fd_filter \n"); + dev_info(&pf->pdev->dev, " rem fd_filter \n"); + dev_info(&pf->pdev->dev, " fd-atr off\n"); + dev_info(&pf->pdev->dev, " fd-atr on\n"); + dev_info(&pf->pdev->dev, " fd current cnt"); + dev_info(&pf->pdev->dev, " lldp start\n"); + dev_info(&pf->pdev->dev, " lldp stop\n"); + dev_info(&pf->pdev->dev, " lldp get local\n"); + dev_info(&pf->pdev->dev, " lldp get remote\n"); + dev_info(&pf->pdev->dev, " lldp event on\n"); + dev_info(&pf->pdev->dev, " lldp event off\n"); + dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations i40e_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_command_read, + .write = i40e_dbg_command_write, +}; + +/************************************************************** + * netdev_ops + * The netdev_ops entry in debugfs is for giving the driver commands + * to be executed from the netdev operations. + **************************************************************/ +static char i40e_dbg_netdev_ops_buf[256] = ""; + +/** + * i40e_dbg_netdev_ops - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + int buf_size = 256; + char *buf; + int len; + + /* don't allow partal reads */ + if (*ppos != 0) + return 0; + if (count < buf_size) + return -ENOSPC; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + len = snprintf(buf, buf_size, "%s: %s\n", + pf->vsi[pf->lan_vsi]->netdev->name, + i40e_dbg_netdev_ops_buf); + + bytes_not_copied = copy_to_user(buffer, buf, len); + kfree(buf); + + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * i40e_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + struct i40e_vsi *vsi; + char *buf_tmp; + int vsi_seid; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(i40e_dbg_netdev_ops_buf)) + return -ENOSPC; + + memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); + bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, + buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied > 0) + count -= bytes_not_copied; + i40e_dbg_netdev_ops_buf[count] = '\0'; + + buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); + if (buf_tmp) { + *buf_tmp = '\0'; + count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; + } + + if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "tx_timeout \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "tx_timeout: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", + vsi_seid); + } else if (test_bit(__I40E_DOWN, &vsi->state)) { + dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "tx_timeout called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { + int mtu; + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", + &vsi_seid, &mtu); + if (cnt != 2) { + dev_info(&pf->pdev->dev, "change_mtu \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "change_mtu: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, + mtu); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "change_mtu called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + + } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "set_rx_mode \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "set_rx_mode: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "set_rx_mode called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + + } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "napi \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", + vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", + vsi_seid); + } else { + for (i = 0; i < vsi->num_q_vectors; i++) + napi_schedule(&vsi->q_vectors[i]->napi); + dev_info(&pf->pdev->dev, "napi called\n"); + } + } else { + dev_info(&pf->pdev->dev, "unknown command '%s'\n", + i40e_dbg_netdev_ops_buf); + dev_info(&pf->pdev->dev, "available commands\n"); + dev_info(&pf->pdev->dev, " tx_timeout \n"); + dev_info(&pf->pdev->dev, " change_mtu \n"); + dev_info(&pf->pdev->dev, " set_rx_mode \n"); + dev_info(&pf->pdev->dev, " napi \n"); + } +netdev_ops_write_done: + return count; +} + +static const struct file_operations i40e_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_netdev_ops_read, + .write = i40e_dbg_netdev_ops_write, +}; + +/** + * i40e_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void i40e_dbg_pf_init(struct i40e_pf *pf) +{ + struct dentry *pfile; + const char *name = pci_name(pf->pdev); + const struct device *dev = &pf->pdev->dev; + + pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); + if (!pf->i40e_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_dump_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->i40e_dbg_pf); + return; +} + +/** + * i40e_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void i40e_dbg_pf_exit(struct i40e_pf *pf) +{ + debugfs_remove_recursive(pf->i40e_dbg_pf); + pf->i40e_dbg_pf = NULL; + + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buf = NULL; +} + +/** + * i40e_dbg_init - start up debugfs for the driver + **/ +void i40e_dbg_init(void) +{ + i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); + if (!i40e_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * i40e_dbg_exit - clean out the driver's debugfs entries + **/ +void i40e_dbg_exit(void) +{ + debugfs_remove_recursive(i40e_dbg_root); + i40e_dbg_root = NULL; +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c new file mode 100644 index 000000000..56438bd57 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -0,0 +1,154 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_diag.h" +#include "i40e_prototype.h" + +/** + * i40e_diag_reg_pattern_test + * @hw: pointer to the hw struct + * @reg: reg to be tested + * @mask: bits to be touched + **/ +static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, + u32 reg, u32 mask) +{ + const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u32 pat, val, orig_val; + int i; + + orig_val = rd32(hw, reg); + for (i = 0; i < ARRAY_SIZE(patterns); i++) { + pat = patterns[i]; + wr32(hw, reg, (pat & mask)); + val = rd32(hw, reg); + if ((val & mask) != (pat & mask)) { + i40e_debug(hw, I40E_DEBUG_DIAG, + "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n", + __func__, reg, pat, val); + return I40E_ERR_DIAG_TEST_FAILED; + } + } + + wr32(hw, reg, orig_val); + val = rd32(hw, reg); + if (val != orig_val) { + i40e_debug(hw, I40E_DEBUG_DIAG, + "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return I40E_ERR_DIAG_TEST_FAILED; + } + + return 0; +} + +struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, + I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, + I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, + {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, + I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 1, + I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 1, + I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, + { 0 } +}; + +/** + * i40e_diag_reg_test + * @hw: pointer to the hw struct + * + * Perform registers diagnostic test + **/ +i40e_status i40e_diag_reg_test(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg, mask; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + !ret_code; i++) { + + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) + i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) + i40e_reg_list[i].elements = + hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; + for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); + } + } + + return ret_code; +} + +/** + * i40e_diag_eeprom_test + * @hw: pointer to the hw struct + * + * Perform EEPROM diagnostic test + **/ +i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) +{ + i40e_status ret_code; + u16 reg_val; + + /* read NVM control word and if NVM valid, validate EEPROM checksum*/ + ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); + if (!ret_code && + ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == + (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { + ret_code = i40e_validate_nvm_checksum(hw, NULL); + } else { + ret_code = I40E_ERR_DIAG_TEST_FAILED; + } + + return ret_code; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.h new file mode 100644 index 000000000..0b5911652 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -0,0 +1,51 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DIAG_H_ +#define _I40E_DIAG_H_ + +#include "i40e_type.h" + +enum i40e_lb_mode { + I40E_LB_MODE_NONE = 0x0, + I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL, + I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, + I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL, +}; + +struct i40e_diag_reg_test_info { + u32 offset; /* the base register */ + u32 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +extern struct i40e_diag_reg_test_info i40e_reg_list[]; + +i40e_status i40e_diag_reg_test(struct i40e_hw *hw); +i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); + +#endif /* _I40E_DIAG_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c new file mode 100644 index 000000000..4cbaaeb90 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -0,0 +1,2587 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for i40e */ + +#include "i40e.h" +#include "i40e_diag.h" + +struct i40e_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define I40E_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +#define I40E_NETDEV_STAT(_net_stat) \ + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) +#define I40E_PF_STAT(_name, _stat) \ + I40E_STAT(struct i40e_pf, _name, _stat) +#define I40E_VSI_STAT(_name, _stat) \ + I40E_STAT(struct i40e_vsi, _name, _stat) +#define I40E_VEB_STAT(_name, _stat) \ + I40E_STAT(struct i40e_veb, _name, _stat) + +static const struct i40e_stats i40e_gstrings_net_stats[] = { + I40E_NETDEV_STAT(rx_packets), + I40E_NETDEV_STAT(tx_packets), + I40E_NETDEV_STAT(rx_bytes), + I40E_NETDEV_STAT(tx_bytes), + I40E_NETDEV_STAT(rx_errors), + I40E_NETDEV_STAT(tx_errors), + I40E_NETDEV_STAT(rx_dropped), + I40E_NETDEV_STAT(tx_dropped), + I40E_NETDEV_STAT(collisions), + I40E_NETDEV_STAT(rx_length_errors), + I40E_NETDEV_STAT(rx_crc_errors), +}; + +static const struct i40e_stats i40e_gstrings_veb_stats[] = { + I40E_VEB_STAT("rx_bytes", stats.rx_bytes), + I40E_VEB_STAT("tx_bytes", stats.tx_bytes), + I40E_VEB_STAT("rx_unicast", stats.rx_unicast), + I40E_VEB_STAT("tx_unicast", stats.tx_unicast), + I40E_VEB_STAT("rx_multicast", stats.rx_multicast), + I40E_VEB_STAT("tx_multicast", stats.tx_multicast), + I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), + I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), + I40E_VEB_STAT("rx_discards", stats.rx_discards), + I40E_VEB_STAT("tx_discards", stats.tx_discards), + I40E_VEB_STAT("tx_errors", stats.tx_errors), + I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), +}; + +static const struct i40e_stats i40e_gstrings_misc_stats[] = { + I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), + I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), + I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), + I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), + I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), + I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), + I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), +}; + +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd); + +/* These PF_STATs might look like duplicates of some NETDEV_STATs, + * but they are separate. This device supports Virtualization, and + * as such might have several netdevs supporting VMDq and FCoE going + * through a single port. The NETDEV_STATs are for individual netdevs + * seen at the top of the stack, and the PF_STATs are for the physical + * function at the bottom of the stack hosting those netdevs. + * + * The PF_STATs are appended to the netdev stats only when ethtool -S + * is queried on the base PF netdev, not on the VMDq or FCoE netdev. + */ +static struct i40e_stats i40e_gstrings_stats[] = { + I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), + I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), + I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), + I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), + I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), + I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), + I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), + I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), + I40E_PF_STAT("tx_errors", stats.eth.tx_errors), + I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), + I40E_PF_STAT("crc_errors", stats.crc_errors), + I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), + I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), + I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("tx_timeout", tx_timeout_count), + I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), + I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), + I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), + I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), + I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), + I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("rx_size_64", stats.rx_size_64), + I40E_PF_STAT("rx_size_127", stats.rx_size_127), + I40E_PF_STAT("rx_size_255", stats.rx_size_255), + I40E_PF_STAT("rx_size_511", stats.rx_size_511), + I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), + I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), + I40E_PF_STAT("rx_size_big", stats.rx_size_big), + I40E_PF_STAT("tx_size_64", stats.tx_size_64), + I40E_PF_STAT("tx_size_127", stats.tx_size_127), + I40E_PF_STAT("tx_size_255", stats.tx_size_255), + I40E_PF_STAT("tx_size_511", stats.tx_size_511), + I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), + I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), + I40E_PF_STAT("tx_size_big", stats.tx_size_big), + I40E_PF_STAT("rx_undersize", stats.rx_undersize), + I40E_PF_STAT("rx_fragments", stats.rx_fragments), + I40E_PF_STAT("rx_oversize", stats.rx_oversize), + I40E_PF_STAT("rx_jabber", stats.rx_jabber), + I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), + I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + + /* LPI stats */ + I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), +}; + +#ifdef I40E_FCOE +static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { + I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc), + I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped), + I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets), + I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords), + I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count), + I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error), + I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets), + I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords), +}; + +#endif /* I40E_FCOE */ +#define I40E_QUEUE_STATS_LEN(n) \ + (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ + * 2 /* Tx and Rx together */ \ + * (sizeof(struct i40e_queue_stats) / sizeof(u64))) +#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) +#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) +#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) +#ifdef I40E_FCOE +#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats) +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ + I40E_FCOE_STATS_LEN + \ + I40E_MISC_STATS_LEN + \ + I40E_QUEUE_STATS_LEN((n))) +#else +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ + I40E_MISC_STATS_LEN + \ + I40E_QUEUE_STATS_LEN((n))) +#endif /* I40E_FCOE */ +#define I40E_PFC_STATS_LEN ( \ + (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ + / sizeof(u64)) +#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) +#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ + I40E_PFC_STATS_LEN + \ + I40E_VSI_STATS_LEN((n))) + +enum i40e_ethtool_test_id { + I40E_ETH_TEST_REG = 0, + I40E_ETH_TEST_EEPROM, + I40E_ETH_TEST_INTR, + I40E_ETH_TEST_LOOPBACK, + I40E_ETH_TEST_LINK, +}; + +static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) + +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + +/** + * i40e_partition_setting_complaint - generic complaint for MFP restriction + * @pf: the PF struct + **/ +static void i40e_partition_setting_complaint(struct i40e_pf *pf) +{ + dev_info(&pf->pdev->dev, + "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n"); +} + +/** + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in + * @netdev: network interface device structure + * + **/ +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + u32 link_speed = hw_link_info->link_speed; + + /* Initialize supported and advertised settings based on phy settings */ + switch (hw_link_info->phy_type) { + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_KR4: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_40000baseKR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_SR4: + ecmd->supported = SUPPORTED_40000baseSR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_LR4: + ecmd->supported = SUPPORTED_40000baseLR4_Full; + break; + case I40E_PHY_TYPE_20GBASE_KR2: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_20000baseKR2_Full; + break; + case I40E_PHY_TYPE_10GBASE_KX4: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseKX4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseKX4_Full; + break; + case I40E_PHY_TYPE_10GBASE_KR: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseKR_Full; + break; + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; + break; + case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + break; + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_SFI: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_10GBASE_AOC: + ecmd->supported = SUPPORTED_10000baseT_Full; + break; + case I40E_PHY_TYPE_SGMII: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + default: + /* if we got here and link is up something bad is afoot */ + netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + hw_link_info->phy_type); + } + + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + ethtool_cmd_speed_set(ecmd, SPEED_40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_DEV_ID_20G_KR2: + /* backplane 20G */ + ecmd->supported = SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_20000baseKR2_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ + ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + switch (hw->phy.media_type) { + case I40E_MEDIA_TYPE_BACKPLANE: + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_Backplane; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_Backplane; + ecmd->port = PORT_NONE; + break; + case I40E_MEDIA_TYPE_BASET: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case I40E_MEDIA_TYPE_DA: + case I40E_MEDIA_TYPE_CX4: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case I40E_MEDIA_TYPE_FIBER: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case I40E_MEDIA_TYPE_UNKNOWN: + default: + ecmd->port = PORT_OTHER; + break; + } + + /* Set transceiver */ + ecmd->transceiver = XCVR_EXTERNAL; + + /* Set flow control settings */ + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case I40E_FC_FULL: + ecmd->advertising |= ADVERTISED_Pause; + break; + case I40E_FC_TX_PAUSE: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + case I40E_FC_RX_PAUSE: + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + } + + return 0; +} + +/** + * i40e_set_settings - Set Speed and Duplex + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Set speed/duplex per media_types advertised/forced + **/ +static int i40e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + struct ethtool_cmd safe_ecmd; + i40e_status status = 0; + bool change = false; + int err = 0; + u8 autoneg; + u32 advertise; + + /* Changing port settings is not supported if this isn't the + * port's controlling PF + */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && + hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && + hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.link_info.link_info & I40E_AQ_LINK_UP) + return -EOPNOTSUPP; + + /* get our own copy of the bits to check against */ + memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); + i40e_get_settings(netdev, &safe_ecmd); + + /* save autoneg and speed out of ecmd */ + autoneg = ecmd->autoneg; + advertise = ecmd->advertising; + + /* set autoneg and speed back to what they currently are */ + ecmd->autoneg = safe_ecmd.autoneg; + ecmd->advertising = safe_ecmd.advertising; + + ecmd->cmd = safe_ecmd.cmd; + /* If ecmd and safe_ecmd are not the same now, then they are + * trying to set something that we do not support + */ + if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) + return -EOPNOTSUPP; + + while (test_bit(__I40E_CONFIG_BUSY, &vsi->state)) + usleep_range(1000, 2000); + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) + return -EAGAIN; + + /* Copy abilities to config in case autoneg is not + * set below + */ + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + config.abilities = abilities.abilities; + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* If autoneg was not already enabled */ + if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } else { + /* If autoneg is supported 10GBASE_T is the only phy that + * can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* If autoneg is currently enabled */ + if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + config.abilities = abilities.abilities & + ~I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } + + if (advertise & ~safe_ecmd.supported) + return -EINVAL; + + if (advertise & ADVERTISED_100baseT_Full) + config.link_speed |= I40E_LINK_SPEED_100MB; + if (advertise & ADVERTISED_1000baseT_Full || + advertise & ADVERTISED_1000baseKX_Full) + config.link_speed |= I40E_LINK_SPEED_1GB; + if (advertise & ADVERTISED_10000baseT_Full || + advertise & ADVERTISED_10000baseKX4_Full || + advertise & ADVERTISED_10000baseKR_Full) + config.link_speed |= I40E_LINK_SPEED_10GB; + if (advertise & ADVERTISED_20000baseKR2_Full) + config.link_speed |= I40E_LINK_SPEED_20GB; + if (advertise & ADVERTISED_40000baseKR4_Full || + advertise & ADVERTISED_40000baseCR4_Full || + advertise & ADVERTISED_40000baseSR4_Full || + advertise & ADVERTISED_40000baseLR4_Full) + config.link_speed |= I40E_LINK_SPEED_40GB; + + if (change || (abilities.link_speed != config.link_speed)) { + /* copy over the rest of the abilities */ + config.phy_type = abilities.phy_type; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; + /* set link and auto negotiation so changes take effect */ + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* If link is up put link down */ + if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed with error %d.\n", + status); + return -EAGAIN; + } + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + netdev_info(netdev, "Updating link info failed with error %d\n", + status); + + } else { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + } + + return err; +} + +static int i40e_nway_reset(struct net_device *netdev) +{ + /* restart autonegotiation */ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + i40e_status ret = 0; + + ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); + if (ret) { + netdev_info(netdev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + return -EIO; + } + + return 0; +} + +/** + * i40e_get_pauseparam - Get Flow Control status + * Return tx/rx-pause status + **/ +static void i40e_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; + + pause->autoneg = + ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* PFC enabled so report LFC as off */ + if (dcbx_cfg->pfc.pfcenable) { + pause->rx_pause = 0; + pause->tx_pause = 0; + return; + } + + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == I40E_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +/** + * i40e_set_pauseparam - Set Flow Control parameter + * @netdev: network interface device structure + * @pause: return tx/rx flow control status + **/ +static int i40e_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + i40e_status status; + u8 aq_failures; + int err = 0; + + /* Changing the port's flow control is not supported if this isn't the + * port's controlling PF + */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE)) { + netdev_info(netdev, "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } + + /* If we have link and don't have autoneg */ + if (!test_bit(__I40E_DOWN, &pf->state) && + !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { + /* Send message that it might not necessarily work*/ + netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); + } + + if (dcbx_cfg->pfc.pfcenable) { + netdev_info(netdev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = I40E_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = I40E_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = I40E_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = I40E_FC_NONE; + else + return -EINVAL; + + /* Tell the OS link is going down, the link will go back up when fw + * says it is ready asynchronously + */ + netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* Set the fc mode and only restart an if link is up*/ + status = i40e_set_fc(hw, &aq_failures, link_up); + + if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { + netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + + if (!test_bit(__I40E_DOWN, &pf->state)) { + /* Give it a little more time to try to come back */ + msleep(75); + if (!test_bit(__I40E_DOWN, &pf->state)) + return i40e_nway_reset(netdev); + } + + return err; +} + +static u32 i40e_get_msglevel(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + return pf->msg_enable; +} + +static void i40e_set_msglevel(struct net_device *netdev, u32 data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (I40E_DEBUG_USER & data) + pf->hw.debug_mask = data; + pf->msg_enable = data; +} + +static int i40e_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; i40e_reg_list[i].offset != 0; i++) + reg_count += i40e_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 *reg_buf = p; + int i, j, ri; + u32 reg; + + /* Tell ethtool which driver-version-specific regs output we have. + * + * At some point, if we have ethtool doing special formatting of + * this data, it will rely on this version number to know how to + * interpret things. Hence, this needs to be updated if/when the + * diags register table is changed. + */ + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; i40e_reg_list[i].offset != 0; i++) { + for (j = 0; j < i40e_reg_list[i].elements; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + reg_buf[ri++] = rd32(hw, reg); + } + } + +} + +static int i40e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + struct i40e_pf *pf = np->vsi->back; + int ret_val = 0, len, offset; + u8 *eeprom_buff; + u16 i, sectors; + bool last; + u32 magic; + +#define I40E_NVM_SECTOR_SIZE 4096 + if (eeprom->len == 0) + return -EINVAL; + + /* check for NVMUpdate access method */ + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + struct i40e_nvm_access *cmd; + int errno; + + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + return -EINVAL; + + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || + (hw->debug_mask & I40E_DEBUG_NVM))) + dev_info(&pf->pdev->dev, + "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); + + return errno; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_val) { + dev_info(&pf->pdev->dev, + "Failed Acquiring NVM resource for read err=%d status=0x%x\n", + ret_val, hw->aq.asq_last_status); + goto free_buff; + } + + sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; + sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; + len = I40E_NVM_SECTOR_SIZE; + last = false; + for (i = 0; i < sectors; i++) { + if (i == (sectors - 1)) { + len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); + last = true; + } + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, + (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), + last, NULL); + if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, + "read NVM failed, invalid offset 0x%x\n", + offset); + break; + } else if (ret_val && + hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { + dev_info(&pf->pdev->dev, + "read NVM failed, access, offset 0x%x\n", + offset); + break; + } else if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed offset %d err=%d status=0x%x\n", + offset, ret_val, hw->aq.asq_last_status); + break; + } + } + + i40e_release_nvm(hw); + memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); +free_buff: + kfree(eeprom_buff); + return ret_val; +} + +static int i40e_get_eeprom_len(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + u32 val; + + val = (rd32(hw, I40E_GLPCI_LBARCTRL) + & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) + >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; + /* register returns value in power of 2, 64Kbyte chunks. */ + val = (64 * 1024) * (1 << val); + return val; +} + +static int i40e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + struct i40e_pf *pf = np->vsi->back; + struct i40e_nvm_access *cmd; + int ret_val = 0; + int errno; + u32 magic; + + /* normal ethtool set_eeprom is not supported */ + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic == magic) + return -EOPNOTSUPP; + + /* check for NVMUpdate access method */ + if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) + return -EINVAL; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && + hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || + (hw->debug_mask & I40E_DEBUG_NVM))) + dev_info(&pf->pdev->dev, + "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); + + return errno; +} + +static void i40e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, i40e_driver_version_str, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; +} + +static void i40e_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = vsi->rx_rings[0]->count; + ring->tx_pending = vsi->tx_rings[0]->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int i40e_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 new_rx_count, new_tx_count; + int i, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || + ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == vsi->tx_rings[0]->count) && + (new_rx_count == vsi->rx_rings[0]->count)) + return 0; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + usleep_range(1000, 2000); + + if (!netif_running(vsi->netdev)) { + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + vsi->tx_rings[i]->count = new_tx_count; + vsi->rx_rings[i]->count = new_rx_count; + } + goto done; + } + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + + /* alloc updated Tx resources */ + if (new_tx_count != vsi->tx_rings[0]->count) { + netdev_info(netdev, + "Changing Tx descriptor count from %d to %d.\n", + vsi->tx_rings[0]->count, new_tx_count); + tx_rings = kcalloc(vsi->alloc_queue_pairs, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!tx_rings) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < vsi->num_queue_pairs; i++) { + /* clone ring and setup updated count */ + tx_rings[i] = *vsi->tx_rings[i]; + tx_rings[i].count = new_tx_count; + err = i40e_setup_tx_descriptors(&tx_rings[i]); + if (err) { + while (i) { + i--; + i40e_free_tx_resources(&tx_rings[i]); + } + kfree(tx_rings); + tx_rings = NULL; + + goto done; + } + } + } + + /* alloc updated Rx resources */ + if (new_rx_count != vsi->rx_rings[0]->count) { + netdev_info(netdev, + "Changing Rx descriptor count from %d to %d\n", + vsi->rx_rings[0]->count, new_rx_count); + rx_rings = kcalloc(vsi->alloc_queue_pairs, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!rx_rings) { + err = -ENOMEM; + goto free_tx; + } + + for (i = 0; i < vsi->num_queue_pairs; i++) { + /* clone ring and setup updated count */ + rx_rings[i] = *vsi->rx_rings[i]; + rx_rings[i].count = new_rx_count; + err = i40e_setup_rx_descriptors(&rx_rings[i]); + if (err) { + while (i) { + i--; + i40e_free_rx_resources(&rx_rings[i]); + } + kfree(rx_rings); + rx_rings = NULL; + + goto free_tx; + } + } + } + + /* Bring interface down, copy in the new ring info, + * then restore the interface + */ + i40e_down(vsi); + + if (tx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_free_tx_resources(vsi->tx_rings[i]); + *vsi->tx_rings[i] = tx_rings[i]; + } + kfree(tx_rings); + tx_rings = NULL; + } + + if (rx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_free_rx_resources(vsi->rx_rings[i]); + *vsi->rx_rings[i] = rx_rings[i]; + } + kfree(rx_rings); + rx_rings = NULL; + } + + i40e_up(vsi); + +free_tx: + /* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) + i40e_free_tx_resources(&tx_rings[i]); + kfree(tx_rings); + tx_rings = NULL; + } + +done: + clear_bit(__I40E_CONFIG_BUSY, &pf->state); + + return err; +} + +static int i40e_get_sset_count(struct net_device *netdev, int sset) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + switch (sset) { + case ETH_SS_TEST: + return I40E_TEST_LEN; + case ETH_SS_STATS: + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { + int len = I40E_PF_STATS_LEN(netdev); + + if (pf->lan_veb != I40E_NO_VEB) + len += I40E_VEB_STATS_LEN; + return len; + } else { + return I40E_VSI_STATS_LEN(netdev); + } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void i40e_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int i = 0; + char *p; + int j; + struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); + unsigned int start; + + i40e_update_stats(vsi); + + for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { + p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; + data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < I40E_MISC_STATS_LEN; j++) { + p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; + data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +#ifdef I40E_FCOE + for (j = 0; j < I40E_FCOE_STATS_LEN; j++) { + p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset; + data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +#endif + rcu_read_lock(); + for (j = 0; j < vsi->num_queue_pairs; j++) { + tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + + if (!tx_ring) + continue; + + /* process Tx ring statistics */ + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + data[i] = tx_ring->stats.packets; + data[i + 1] = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + i += 2; + + /* Rx ring is the 2nd half of the queue pair */ + rx_ring = &tx_ring[1]; + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + data[i] = rx_ring->stats.packets; + data[i + 1] = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + i += 2; + } + rcu_read_unlock(); + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + if (pf->lan_veb != I40E_NO_VEB) { + struct i40e_veb *veb = pf->veb[pf->lan_veb]; + for (j = 0; j < I40E_VEB_STATS_LEN; j++) { + p = (char *)veb; + p += i40e_gstrings_veb_stats[j].stat_offset; + data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { + p = (char *)pf + i40e_gstrings_stats[j].stat_offset; + data[i++] = (i40e_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_tx[j]; + data[i++] = pf->stats.priority_xoff_tx[j]; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_rx[j]; + data[i++] = pf->stats.priority_xoff_rx[j]; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) + data[i++] = pf->stats.priority_xon_2_xoff[j]; +} + +static void i40e_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < I40E_TEST_LEN; i++) { + memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_net_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_misc_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } +#ifdef I40E_FCOE + for (i = 0; i < I40E_FCOE_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_fcoe_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } +#endif + for (i = 0; i < vsi->num_queue_pairs; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + p += ETH_GSTRING_LEN; + } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + if (pf->lan_veb != I40E_NO_VEB) { + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "veb.%s", + i40e_gstrings_veb_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "port.%s", + i40e_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon_2_xoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; + } +} + +static int i40e_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + /* only report HW timestamping if PTP is enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return ethtool_op_get_ts_info(dev, info); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pf->ptp_clock) + info->phc_index = ptp_clock_index(pf->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; +} + +static int i40e_link_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "link test\n"); + if (i40e_get_link_status(&pf->hw)) + *data = 0; + else + *data = 1; + + return *data; +} + +static int i40e_reg_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "register test\n"); + *data = i40e_diag_reg_test(&pf->hw); + + return *data; +} + +static int i40e_eeprom_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "eeprom test\n"); + *data = i40e_diag_eeprom_test(&pf->hw); + + /* forcebly clear the NVM Update state machine */ + pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; + + return *data; +} + +static int i40e_intr_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + u16 swc_old = pf->sw_int_count; + + netif_info(pf, hw, netdev, "interrupt test\n"); + wr32(&pf->hw, I40E_PFINT_DYN_CTL0, + (I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); + usleep_range(1000, 2000); + *data = (swc_old == pf->sw_int_count); + + return *data; +} + +static int i40e_loopback_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "loopback test not implemented\n"); + *data = 0; + + return *data; +} + +static void i40e_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + netif_info(pf, drv, netdev, "offline testing starting\n"); + + set_bit(__I40E_TESTING, &pf->state); + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + /* Link test performed before hardware reset + * so autoneg doesn't interfere with test result + */ + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* run reg test last, a reset is required after it */ + if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__I40E_TESTING, &pf->state); + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + netif_info(pf, drv, netdev, "online testing starting\n"); + + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline only tests, not run in online; pass by default */ + data[I40E_ETH_TEST_REG] = 0; + data[I40E_ETH_TEST_EEPROM] = 0; + data[I40E_ETH_TEST_INTR] = 0; + data[I40E_ETH_TEST_LOOPBACK] = 0; + } + + netif_info(pf, drv, netdev, "testing finished\n"); +} + +static void i40e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); + } +} + +/** + * i40e_set_wol - set the WakeOnLAN configuration + * @netdev: the netdev in question + * @wol: the ethtool WoL setting data + **/ +static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* WoL not supported if this isn't the controlling PF on the port */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if (((1 << hw->port) & wol_nvm_bits)) + return -EOPNOTSUPP; + + /* only magic packet is supported */ + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) + return -EOPNOTSUPP; + + /* is this a new value? */ + if (pf->wol_en != !!wol->wolopts) { + pf->wol_en = !!wol->wolopts; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + } + + return 0; +} + +static int i40e_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + pf->led_status = i40e_led_get(hw); + return blink_freq; + case ETHTOOL_ID_ON: + i40e_led_set(hw, 0xF, false); + break; + case ETHTOOL_ID_OFF: + i40e_led_set(hw, 0x0, false); + break; + case ETHTOOL_ID_INACTIVE: + i40e_led_set(hw, pf->led_status, false); + break; + default: + break; + } + + return 0; +} + +/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt + * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also + * 125us (8000 interrupts per second) == ITR(62) + */ + +static int i40e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + ec->tx_max_coalesced_frames_irq = vsi->work_limit; + ec->rx_max_coalesced_frames_irq = vsi->work_limit; + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + + return 0; +} + +static int i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_q_vector *q_vector; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 vector; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + vector = vsi->base_vector; + if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + } else if (ec->rx_coalesce_usecs == 0) { + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + if (ec->use_adaptive_rx_coalesce) + netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); + } else { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + } else if (ec->tx_coalesce_usecs == 0) { + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + if (ec->use_adaptive_tx_coalesce) + netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else { + netif_info(pf, drv, netdev, + "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->use_adaptive_rx_coalesce) + vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + + if (ec->use_adaptive_tx_coalesce) + vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); + i40e_flush(hw); + } + + return 0; +} + +/** + * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type + * @pf: pointer to the physical function struct + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow is supported, else Invalid Input. + **/ +static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } + /* Report default options for RSS on i40e */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through to add IP fields */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through to add IP fields */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * @rule_locs: Array of used rule locations + * + * This function populates both the total and actual rule count of + * the ethtool flow classification command + * + * Returns 0 on success or -EMSGSIZE if entry not found + **/ +static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_fdir_filter *rule; + struct hlist_node *node2; + int cnt = 0; + + /* report total rule count */ + cmd->data = i40e_get_fd_cnt_all(pf); + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->fd_id; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +/** + * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * This function looks up a filter based on the Rx flow classification + * command and fills the flow spec info for it if found + * + * Returns 0 on success or -EINVAL if filter not found + **/ +static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_fdir_filter *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->fd_id) + break; + } + + if (!rule || fsp->location != rule->fd_id) + return -EINVAL; + + fsp->flow_type = rule->flow_type; + if (fsp->flow_type == IP_USER_FLOW) { + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + } + + /* Reverse the src and dest notion, since the HW views them from + * Tx perspective where as the user expects it from Rx filter view. + */ + fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; + + if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + + if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { + struct i40e_vsi *vsi; + + vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); + if (vsi && vsi->type == I40E_VSI_SRIOV) { + fsp->h_ext.data[1] = htonl(vsi->vf_id); + fsp->m_ext.data[1] = htonl(0x1); + } + } + + return 0; +} + +/** + * i40e_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi->alloc_queue_pairs; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = i40e_get_rss_hash_opts(pf, cmd); + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = pf->fdir_pf_active_filters; + /* report total rule count */ + cmd->data = i40e_get_fd_cnt_all(pf); + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = i40e_get_ethtool_fdir_entry(pf, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); + break; + default: + break; + } + + return ret; +} + +/** + * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash + * @pf: pointer to the physical function struct + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + **/ +static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) +{ + struct i40e_hw *hw = &pf->hw; + u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | + ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + default: + return -EINVAL; + } + break; + case TCP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + break; + case IPV4_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + break; + case IPV6_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + break; + default: + return -EINVAL; + } + + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); + wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_flush(hw); + + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + + return 0; +} + +/** + * i40e_match_fdir_input_set - Match a new filter against an existing one + * @rule: The filter already added + * @input: The new filter to comapre against + * + * Returns true if the two input set match + **/ +static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, + struct i40e_fdir_filter *input) +{ + if ((rule->dst_ip[0] != input->dst_ip[0]) || + (rule->src_ip[0] != input->src_ip[0]) || + (rule->dst_port != input->dst_port) || + (rule->src_port != input->src_port)) + return false; + return true; +} + +/** + * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry + * @vsi: Pointer to the targeted VSI + * @input: The filter to update or NULL to indicate deletion + * @sw_idx: Software index to the filter + * @cmd: The command to get or set Rx flow classification rules + * + * This function updates (or deletes) a Flow Director entry from + * the hlist of the corresponding PF + * + * Returns 0 on success + **/ +static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, + u16 sw_idx, + struct ethtool_rxnfc *cmd) +{ + struct i40e_fdir_filter *rule, *parent; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node2; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->fd_id >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->fd_id == sw_idx)) { + if (input && !i40e_match_fdir_input_set(rule, input)) + err = i40e_add_del_fdir(vsi, rule, false); + else if (!input) + err = i40e_add_del_fdir(vsi, rule, false); + hlist_del(&rule->fdir_node); + kfree(rule); + pf->fdir_pf_active_filters--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, + &pf->fdir_filter_list); + + /* update counts */ + pf->fdir_pf_active_filters++; + + return 0; +} + +/** + * i40e_del_fdir_entry - Deletes a Flow Director filter entry + * @vsi: Pointer to the targeted VSI + * @cmd: The command to get or set Rx flow classification rules + * + * The function removes a Flow Director filter entry from the + * hlist of the corresponding PF + * + * Returns 0 on success + */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_pf *pf = vsi->back; + int ret = 0; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); + + i40e_fdir_check_and_reenable(pf); + return ret; +} + +/** + * i40e_add_fdir_ethtool - Add/Remove Flow Director filters + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * + * Add Flow Director filters for a specific flow spec based on their + * protocol. Returns 0 if the filters were successfully added. + **/ +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp; + struct i40e_fdir_filter *input; + struct i40e_pf *pf; + int ret = -EINVAL; + u16 vf_id; + + if (!vsi) + return -EINVAL; + + pf = vsi->back; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return -EOPNOTSUPP; + + if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) + return -ENOSPC; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + + pf->hw.func_caps.fd_filters_guaranteed)) { + return -EINVAL; + } + + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= vsi->num_queue_pairs)) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + + if (!input) + return -ENOMEM; + + input->fd_id = fsp->location; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else + input->dest_ctl = + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + + input->q_index = fsp->ring_cookie; + input->flex_off = 0; + input->pctype = 0; + input->dest_vsi = vsi->id; + input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; + input->cnt_index = pf->fd_sb_cnt_idx; + input->flow_type = fsp->flow_type; + input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; + + /* Reverse the src and dest notion, since the HW expects them to be from + * Tx perspective where as the input from user is from Rx filter view. + */ + input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; + input->src_port = fsp->h_u.tcp_ip4_spec.pdst; + input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + + if (ntohl(fsp->m_ext.data[1])) { + if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { + netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); + goto free_input; + } + vf_id = ntohl(fsp->h_ext.data[1]); + /* Find vsi id from vf id and override dest vsi */ + input->dest_vsi = pf->vf[vf_id].lan_vsi_id; + if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { + netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); + goto free_input; + } + } + + ret = i40e_add_del_fdir(vsi, input, true); +free_input: + if (ret) + kfree(input); + else + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); + + return ret; +} + +/** + * i40e_set_rxnfc - command to set RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = i40e_set_rss_hash_opt(pf, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = i40e_add_fdir_ethtool(vsi, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = i40e_del_fdir_entry(vsi, cmd); + break; + default: + break; + } + + return ret; +} + +/** + * i40e_max_channels - get Max number of combined channels supported + * @vsi: vsi pointer + **/ +static unsigned int i40e_max_channels(struct i40e_vsi *vsi) +{ + /* TODO: This code assumes DCB and FD is disabled for now. */ + return vsi->alloc_queue_pairs; +} + +/** + * i40e_get_channels - Get the current channels enabled and max supported etc. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void i40e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + /* report maximum channels */ + ch->max_combined = i40e_max_channels(vsi); + + /* report info for other vector */ + ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; + ch->max_other = ch->other_count; + + /* Note: This code assumes DCB is disabled for now. */ + ch->combined_count = vsi->num_queue_pairs; +} + +/** + * i40e_set_channels - Set the new channels count. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * The new channels count may not be the same as requested by the user + * since it gets rounded down to a power of 2 value. + **/ +static int i40e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int count = ch->combined_count; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int new_count; + + /* We do not support setting channels for any other VSI at present */ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > i40e_max_channels(vsi)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + /* TODO: Flow director limit, DCB etc */ + + /* use rss_reconfig to rebuild with new queue count and update traffic + * class queue mapping + */ + new_count = i40e_reconfig_rss_queues(pf, count); + if (new_count > 0) + return 0; + else + return -EINVAL; +} + +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +/** + * i40e_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_key_size(struct net_device *netdev) +{ + return I40E_HKEY_ARRAY_SIZE; +} + +/** + * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) +{ + return I40E_HLUT_ARRAY_SIZE; +} + +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HLUT(i)); + indir[j++] = reg_val & 0xff; + indir[j++] = (reg_val >> 8) & 0xff; + indir[j++] = (reg_val >> 16) & 0xff; + indir[j++] = (reg_val >> 24) & 0xff; + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HKEY(i)); + key[j++] = (u8)(reg_val & 0xff); + key[j++] = (u8)((reg_val >> 8) & 0xff); + key[j++] = (u8)((reg_val >> 16) & 0xff); + key[j++] = (u8)((reg_val >> 24) & 0xff); + } + } + return 0; +} + +/** + * i40e_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = indir[j++]; + reg_val |= indir[j++] << 8; + reg_val |= indir[j++] << 16; + reg_val |= indir[j++] << 24; + wr32(hw, I40E_PFQF_HLUT(i), reg_val); + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = key[j++]; + reg_val |= key[j++] << 8; + reg_val |= key[j++] << 16; + reg_val |= key[j++] << 24; + wr32(hw, I40E_PFQF_HKEY(i), reg_val); + } + } + return 0; +} + +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + +static const struct ethtool_ops i40e_ethtool_ops = { + .get_settings = i40e_get_settings, + .set_settings = i40e_set_settings, + .get_drvinfo = i40e_get_drvinfo, + .get_regs_len = i40e_get_regs_len, + .get_regs = i40e_get_regs, + .nway_reset = i40e_nway_reset, + .get_link = ethtool_op_get_link, + .get_wol = i40e_get_wol, + .set_wol = i40e_set_wol, + .set_eeprom = i40e_set_eeprom, + .get_eeprom_len = i40e_get_eeprom_len, + .get_eeprom = i40e_get_eeprom, + .get_ringparam = i40e_get_ringparam, + .set_ringparam = i40e_set_ringparam, + .get_pauseparam = i40e_get_pauseparam, + .set_pauseparam = i40e_set_pauseparam, + .get_msglevel = i40e_get_msglevel, + .set_msglevel = i40e_set_msglevel, + .get_rxnfc = i40e_get_rxnfc, + .set_rxnfc = i40e_set_rxnfc, + .self_test = i40e_diag_test, + .get_strings = i40e_get_strings, + .set_phys_id = i40e_set_phys_id, + .get_sset_count = i40e_get_sset_count, + .get_ethtool_stats = i40e_get_ethtool_stats, + .get_coalesce = i40e_get_coalesce, + .set_coalesce = i40e_set_coalesce, + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, + .get_channels = i40e_get_channels, + .set_channels = i40e_set_channels, + .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, +}; + +void i40e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &i40e_ethtool_ops; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c new file mode 100644 index 000000000..1803afeef --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -0,0 +1,1565 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e.h" +#include "i40e_fcoe.h" + +/** + * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE + * @ptype: the packet type field from rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} + +/** + * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF + * @sof: the FCoE start of frame delimiter + **/ +static inline bool i40e_fcoe_sof_is_class2(u8 sof) +{ + return (sof == FC_SOF_I2) || (sof == FC_SOF_N2); +} + +/** + * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF + * @sof: the FCoE start of frame delimiter + **/ +static inline bool i40e_fcoe_sof_is_class3(u8 sof) +{ + return (sof == FC_SOF_I3) || (sof == FC_SOF_N3); +} + +/** + * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW + * @sof: the input SOF value from the frame + **/ +static inline bool i40e_fcoe_sof_is_supported(u8 sof) +{ + return i40e_fcoe_sof_is_class2(sof) || + i40e_fcoe_sof_is_class3(sof); +} + +/** + * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame + * @skb: the frame whose EOF is to be pulled from + **/ +static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof) +{ + *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + + if (!i40e_fcoe_sof_is_supported(*sof)) + return -EINVAL; + return 0; +} + +/** + * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW + * @eof: the input EOF value from the frame + **/ +static inline bool i40e_fcoe_eof_is_supported(u8 eof) +{ + return (eof == FC_EOF_N) || (eof == FC_EOF_T) || + (eof == FC_EOF_NI) || (eof == FC_EOF_A); +} + +/** + * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame + * @skb: the frame whose EOF is to be pulled from + **/ +static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) +{ + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, eof, 1); + + if (!i40e_fcoe_eof_is_supported(*eof)) + return -EINVAL; + return 0; +} + +/** + * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming + * @eof: the input eof value from the frame + * + * The FC EOF is converted to the value understood by HW for descriptor + * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() + * first. + **/ +static inline u32 i40e_fcoe_ctxt_eof(u8 eof) +{ + switch (eof) { + case FC_EOF_N: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N; + case FC_EOF_T: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T; + case FC_EOF_NI: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI; + case FC_EOF_A: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; + default: + /* FIXME: still returns 0 */ + pr_err("Unrecognized EOF %x\n", eof); + return 0; + } +} + +/** + * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid + * @xid: the exchange id + **/ +static inline bool i40e_fcoe_xid_is_valid(u16 xid) +{ + return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX); +} + +/** + * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated + * @pf: pointer to PF + * @ddp: sw DDP context + * + * Unmap the scatter-gather list associated with the given SW DDP context + * + * Returns: data length already ddp-ed in bytes + * + **/ +static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf, + struct i40e_fcoe_ddp *ddp) +{ + if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags)) + return; + + if (ddp->sgl) { + dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + ddp->sgl = NULL; + ddp->sgc = 0; + } + + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } +} + +/** + * i40e_fcoe_ddp_clear - clear the given SW DDP context + * @ddp - SW DDP context + **/ +static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp) +{ + memset(ddp, 0, sizeof(struct i40e_fcoe_ddp)); + ddp->xid = FC_XID_UNKNOWN; + ddp->flags = __I40E_FCOE_DDP_NONE; +} + +/** + * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE + * @id: the prog id for the programming status Rx descriptor write-back + **/ +static inline bool i40e_fcoe_progid_is_fcoe(u8 id) +{ + return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || + (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS); +} + +/** + * i40e_fcoe_fc_get_xid - get xid from the frame header + * @fh: the fc frame header + * + * In case the incoming frame's exchange is originated from + * the initiator, then received frame's exchange id is ANDed + * with fc_cpu_mask bits to get the same cpu on which exchange + * was originated, otherwise just use the current cpu. + * + * Returns ox_id if exchange originator, rx_id if responder + **/ +static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh) +{ + u32 f_ctl = ntoh24(fh->fh_f_ctl); + + return (f_ctl & FC_FC_EX_CTX) ? + be16_to_cpu(fh->fh_ox_id) : + be16_to_cpu(fh->fh_rx_id); +} + +/** + * i40e_fcoe_fc_frame_header - get fc frame header from skb + * @skb: packet + * + * This checks if there is a VLAN header and returns the data + * pointer to the start of the fc_frame_header. + * + * Returns pointer to the fc_frame_header + **/ +static inline struct fc_frame_header *i40e_fcoe_fc_frame_header( + struct sk_buff *skb) +{ + void *fh = skb->data + sizeof(struct fcoe_hdr); + + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) + fh += sizeof(struct vlan_hdr); + + return (struct fc_frame_header *)fh; +} + +/** + * i40e_fcoe_ddp_put - release the DDP context for a given exchange id + * @netdev: the corresponding net_device + * @xid: the exchange id that corresponding DDP context will be released + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + **/ +static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + int len = 0; + struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid]; + + if (!fcoe || !ddp) + goto out; + + if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags)) + len = ddp->len; + i40e_fcoe_ddp_unmap(pf, ddp); +out: + return len; +} + +/** + * i40e_fcoe_sw_init - sets up the HW for FCoE + * @pf: pointer to PF + * + * Returns 0 if FCoE is supported otherwise the error code + **/ +int i40e_init_pf_fcoe(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + pf->num_fcoe_qps = 0; + pf->fcoe_hmc_cntx_num = 0; + pf->fcoe_hmc_filt_num = 0; + + if (!pf->hw.func_caps.fcoe) { + dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); + return 0; + } + + if (!pf->hw.func_caps.dcb) { + dev_warn(&pf->pdev->dev, + "Hardware is not DCB capable not enabling FCoE.\n"); + return 0; + } + + /* enable FCoE hash filter */ + val = rd32(hw, I40E_PFQF_HENA(1)); + val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); + val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); + val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; + wr32(hw, I40E_PFQF_HENA(1), val); + + /* enable flag */ + pf->flags |= I40E_FLAG_FCOE_ENABLED; + pf->num_fcoe_qps = I40E_DEFAULT_FCOE; + + /* Reserve 4K DDP contexts and 20K filter size for FCoE */ + pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * + I40E_DMA_CNTX_BASE_SIZE; + pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + + (1 << I40E_HASH_FILTER_SIZE_16K) * + I40E_HASH_FILTER_BASE_SIZE; + + /* FCoE object: max 16K filter buckets and 4K DMA contexts */ + pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K; + pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; + + /* Setup max frame with FCoE_MTU plus L2 overheads */ + val = rd32(hw, I40E_GLFCOE_RCTL); + val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; + val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) + << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); + wr32(hw, I40E_GLFCOE_RCTL, val); + + dev_info(&pf->pdev->dev, "FCoE is supported.\n"); + return 0; +} + +/** + * i40e_get_fcoe_tc_map - Return TC map for FCoE APP + * @pf: pointer to PF + * + **/ +u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) +{ + struct i40e_dcb_app_priority_table app; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 0; + u8 tc, i; + /* Get the FCoE APP TLV */ + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = dcbcfg->app[i]; + if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app.protocolid == ETH_P_FCOE) { + tc = dcbcfg->etscfg.prioritytable[app.priority]; + enabled_tc |= (1 << tc); + break; + } + } + + /* TC0 if there is no TC defined for FCoE APP TLV */ + enabled_tc = enabled_tc ? enabled_tc : 0x1; + + return enabled_tc; +} + +/** + * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI + * @vsi: pointer to the associated VSI struct + * @ctxt: pointer to the associated VSI context to be passed to HW + * + * Returns 0 on success or < 0 on error + **/ +int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) +{ + struct i40e_aqc_vsi_properties_data *info = &ctxt->info; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 0; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + dev_err(&pf->pdev->dev, + "FCoE is not enabled for this device\n"); + return -EPERM; + } + + /* initialize the hardware for FCoE */ + ctxt->pf_num = hw->pf_id; + ctxt->vf_num = 0; + ctxt->uplink_seid = vsi->uplink_seid; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt->flags = I40E_AQ_VSI_TYPE_PF; + + /* FCoE VSI would need the following sections */ + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + + /* FCoE VSI does not need these sections */ + info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | + I40E_AQ_VSI_PROP_VLAN_VALID | + I40E_AQ_VSI_PROP_CAS_PV_VALID | + I40E_AQ_VSI_PROP_INGRESS_UP_VALID | + I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); + + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + enabled_tc = i40e_get_fcoe_tc_map(pf); + i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); + + /* set up queue option section: only enable FCoE */ + info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA; + + return 0; +} + +/** + * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable, + * indicating the upper FCoE protocol stack is ready to use FCoE + * offload features. + * + * @netdev: pointer to the netdev that FCoE is created on + * + * Returns 0 on success + * + * in RTNL + * + **/ +int i40e_fcoe_enable(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + netdev_err(netdev, "HW does not support FCoE.\n"); + return -ENODEV; + } + + if (vsi->type != I40E_VSI_FCOE) { + netdev_err(netdev, "interface does not support FCoE.\n"); + return -EBUSY; + } + + atomic_inc(&fcoe->refcnt); + + return 0; +} + +/** + * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack. + * @dev: pointer to the netdev that FCoE is created on + * + * Returns 0 on success + * + **/ +int i40e_fcoe_disable(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + netdev_err(netdev, "device does not support FCoE\n"); + return -ENODEV; + } + if (vsi->type != I40E_VSI_FCOE) + return -EBUSY; + + if (!atomic_dec_and_test(&fcoe->refcnt)) + return -EINVAL; + + netdev_info(netdev, "FCoE disabled\n"); + + return 0; +} + +/** + * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP + * @fcoe: the FCoE sw object + * @dev: the device that the pool is associated with + * @cpu: the cpu for this pool + * + **/ +static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct i40e_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (!ddp_pool->pool) { + dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu); + return; + } + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +/** + * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP + * @fcoe: the FCoE sw object + * @dev: the device that the pool is associated with + * @cpu: the cpu for this pool + * + * Returns 0 on successful or non zero on failure + * + **/ +static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct i40e_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool && ddp_pool->pool) { + dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu); + return 0; + } + snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu); + pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, + I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE); + if (!pool) { + dev_err(dev, "dma_pool_create %s failed\n", pool_name); + return -ENOMEM; + } + ddp_pool->pool = pool; + return 0; +} + +/** + * i40e_fcoe_free_ddp_resources - release FCoE DDP resources + * @vsi: the vsi FCoE is associated with + * + **/ +void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + int cpu, i; + + /* do nothing if not FCoE VSI */ + if (vsi->type != I40E_VSI_FCOE) + return; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + for (i = 0; i < I40E_FCOE_DDP_MAX; i++) + i40e_fcoe_ddp_put(vsi->netdev, i); + + for_each_possible_cpu(cpu) + i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu); + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; + + netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n", + vsi->id, vsi->seid); +} + +/** + * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources + * @vsi: the VSI FCoE is associated with + * + * Returns 0 on successful or non zero on failure + * + **/ +int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev = &pf->pdev->dev; + struct i40e_fcoe *fcoe = &pf->fcoe; + unsigned int cpu; + int i; + + if (vsi->type != I40E_VSI_FCOE) + return -ENODEV; + + /* do nothing if no DDP pools were allocated */ + if (fcoe->ddp_pool) + return -EEXIST; + + /* allocate per CPU memory to track DDP pools */ + fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool); + if (!fcoe->ddp_pool) { + dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n"); + return -ENOMEM; + } + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu)) + continue; + + dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu); + i40e_fcoe_free_ddp_resources(vsi); + return -ENOMEM; + } + + /* initialize the sw context */ + for (i = 0; i < I40E_FCOE_DDP_MAX; i++) + i40e_fcoe_ddp_clear(&fcoe->ddp[i]); + + netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n", + vsi->id, vsi->seid); + + return 0; +} + +/** + * i40e_fcoe_handle_status - check the Programming Status for FCoE + * @rx_ring: the Rx ring for this descriptor + * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor. + * + * Check if this is the Rx Programming Status descriptor write-back for FCoE. + * This is used to verify if the context/filter programming or invalidation + * requested by SW to the HW is successful or not and take actions accordingly. + **/ +void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct i40e_fcoe_ddp *ddp; + u32 error; + u16 xid; + u64 qw; + + /* we only care for FCoE here */ + if (!i40e_fcoe_progid_is_fcoe(prog_id)) + return; + + xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) & + (I40E_FCOE_DDP_MAX - 1); + + if (!i40e_fcoe_xid_is_valid(xid)) + return; + + ddp = &fcoe->ddp[xid]; + WARN_ON(xid != ddp->xid); + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + + /* DDP context programming status: failure or success */ + if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) { + if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT; + } + if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT; + } + } + + /* DDP context invalidation status: failure or success */ + if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) { + if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT; + } + /* clear the flag so we can retry invalidation */ + clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags); + } + + /* unmap DMA */ + i40e_fcoe_ddp_unmap(pf, ddp); + i40e_fcoe_ddp_clear(ddp); +} + +/** + * i40e_fcoe_handle_offload - check ddp status and mark it done + * @adapter: i40e adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + * + **/ +int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct fc_frame_header *fh = NULL; + struct i40e_fcoe_ddp *ddp = NULL; + u32 status, fltstat; + u32 error, fcerr; + int rc = -EINVAL; + u16 ptype; + u16 xid; + u64 qw; + + /* check this rxd is for programming status */ + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + /* packet descriptor, check packet type */ + ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + if (!i40e_rx_is_fcoe(ptype)) + goto out_no_ddp; + + error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; + fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) & + I40E_RX_DESC_FCOE_ERROR_MASK; + + /* check stateless offload error */ + if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) { + dev_err(&pf->pdev->dev, "Protocol Error\n"); + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + /* check hw status on ddp */ + status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; + fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & + I40E_RX_DESC_FLTSTAT_FCMASK; + + /* now we are ready to check DDP */ + fh = i40e_fcoe_fc_frame_header(skb); + xid = i40e_fcoe_fc_get_xid(fh); + if (!i40e_fcoe_xid_is_valid(xid)) + goto out_no_ddp; + + /* non DDP normal receive, return to the protocol stack */ + if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH) + goto out_no_ddp; + + /* do we have a sw ddp context setup ? */ + ddp = &fcoe->ddp[xid]; + if (!ddp->sgl) + goto out_no_ddp; + + /* fetch xid from hw rxd wb, which should match up the sw ctxt */ + xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id); + if (ddp->xid != xid) { + dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n", + ddp->xid, xid); + goto out_put_ddp; + } + + /* the same exchange has already errored out */ + if (ddp->fcerr) { + dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n", + xid, ddp->fcerr, fcerr); + goto out_put_ddp; + } + + /* fcoe param is valid by now with correct DDPed length */ + ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param); + ddp->fcerr = fcerr; + /* header posting only, useful only for target mode and debugging */ + if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) { + /* For target mode, we get header of the last packet but it + * does not have the FCoE trailer field, i.e., CRC and EOF + * Ordered Set since they are offloaded by the HW, so fill + * it up correspondingly to allow the packet to pass through + * to the upper protocol stack. + */ + u32 f_ctl = ntoh24(fh->fh_f_ctl); + + if ((f_ctl & FC_FC_END_SEQ) && + (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) { + struct fcoe_crc_eof *crc = NULL; + + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } else { + /* otherwise, drop the header only frame */ + rc = 0; + goto out_no_ddp; + } + } + +out_put_ddp: + /* either we got RSP or we have an error, unmap DMA in both cases */ + i40e_fcoe_ddp_unmap(pf, ddp); + if (ddp->len && !ddp->fcerr) { + int pkts; + + rc = ddp->len; + i40e_fcoe_ddp_clear(ddp); + ddp->len = rc; + pkts = DIV_ROUND_UP(rc, 2048); + rx_ring->stats.bytes += rc; + rx_ring->stats.packets += pkts; + rx_ring->q_vector->rx.total_bytes += rc; + rx_ring->q_vector->rx.total_packets += pkts; + set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags); + } + +out_no_ddp: + return rc; +} + +/** + * i40e_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * @target_mode: indicates this is a DDP request for target + * + * Returns : 1 for success and 0 for no DDP on this I/O + **/ +static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_fcoe_ddp_pool *ddp_pool; + struct i40e_pf *pf = np->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + unsigned int i, j, dmacount; + struct i40e_fcoe_ddp *ddp; + unsigned int firstoff = 0; + unsigned int thisoff = 0; + unsigned int thislen = 0; + struct scatterlist *sg; + dma_addr_t addr = 0; + unsigned int len; + + if (xid >= I40E_FCOE_DDP_MAX) { + dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_NEEDS_RESTART, &pf->state)) { + dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n", + xid); + return 0; + } + + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + i40e_fcoe_ddp_clear(ddp); + + if (!fcoe->ddp_pool) { + dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n", + sgl, sgc); + goto out_noddp_unmap; + } + + /* alloc the udl from our ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + dev_info(&pf->pdev->dev, + "Failed allocated ddp context, xid 0x%x\n", xid); + goto out_noddp_unmap; + } + + j = 0; + ddp->len = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + ddp->len += len; + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) { + dev_info(&pf->pdev->dev, + "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min_t(unsigned int, (bufflen - thisoff), len); + /* all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + + /* all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) && + ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + ddp->lastsize = thisoff + thislen; + ddp->firstoff = firstoff; + ddp->list_len = j; + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + ddp->xid = xid; + if (target_mode) + set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); + set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags); + + put_cpu(); + return 1; /* Success */ + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + i40e_fcoe_ddp_clear(ddp); + +out_noddp_unmap: + dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + **/ +static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * i40e_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + **/ +static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * i40e_fcoe_program_ddp - programs the HW DDP related descriptors + * @tx_ring: transmit ring for this packet + * @skb: the packet to be sent out + * @sof: the SOF to indicate class of service + * + * Determine if it is READ/WRITE command, and finds out if there is + * a matching SW DDP context for this command. DDP is applicable + * only in case of READ if initiator or WRITE in case of + * responder (via checking XFER_RDY). + * + * Note: caller checks sof and ddp sw context + * + * Returns : none + * + **/ +static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_fcoe_ddp *ddp, u8 sof) +{ + struct i40e_fcoe_filter_context_desc *filter_desc = NULL; + struct i40e_fcoe_queue_context_desc *queue_desc = NULL; + struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL; + struct i40e_pf *pf = tx_ring->vsi->back; + u16 i = tx_ring->next_to_use; + struct fc_frame_header *fh; + u64 flags_rsvd_lanq = 0; + bool target_mode; + + /* check if abort is still pending */ + if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) { + dev_warn(&pf->pdev->dev, + "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n", + ddp->xid, ddp->flags); + return; + } + + /* set the flag to indicate this is programmed */ + if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) { + dev_warn(&pf->pdev->dev, + "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n", + ddp->xid, ddp->flags); + return; + } + + /* Prepare the DDP context descriptor */ + ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + ddp_desc->type_cmd_foff_lsize = + cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX | + ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K << + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) | + ((u64)ddp->firstoff << + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) | + ((u64)ddp->lastsize << + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)); + ddp_desc->rsvd = cpu_to_le64(0); + + /* target mode needs last packet in the sequence */ + target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); + if (target_mode) + ddp_desc->type_cmd_foff_lsize |= + cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH); + + /* Prepare queue_context descriptor */ + queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++); + if (i == tx_ring->count) + i = 0; + queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp)); + queue_desc->flen_tph = cpu_to_le64(ddp->list_len | + ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC | + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) << + I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT)); + + /* Prepare filter_context_desc */ + filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + fh = (struct fc_frame_header *)skb_transport_header(skb); + filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset)); + filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt)); + filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid << + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT); + + flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP; + flags_rsvd_lanq |= (u64)(target_mode ? + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP : + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT); + + flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ? + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 : + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3); + + flags_rsvd_lanq |= ((u64)skb->queue_mapping << + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT); + filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq); + + /* By this time, all offload related descriptors has been programmed */ + tx_ring->next_to_use = i; +} + +/** + * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort + * @tx_ring: transmit ring for this packet + * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS + * @ddp: the SW DDP context for this DDP + * + * Programs the Tx context descriptor to do DDP invalidation. + **/ +static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_fcoe_ddp *ddp) +{ + struct i40e_tx_context_desc *context_desc; + int i; + + if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) + return; + + i = tx_ring->next_to_use; + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + context_desc->tunneling_params = cpu_to_le32(0); + context_desc->l2tag2 = cpu_to_le16(0); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64( + I40E_TX_DESC_DTYPE_FCOE_CTX | + (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL << + I40E_TXD_CTX_QW1_CMD_SHIFT) | + (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND << + I40E_TXD_CTX_QW1_CMD_SHIFT)); + tx_ring->next_to_use = i; +} + +/** + * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP + * @tx_ring: transmit ring for this packet + * @skb: the packet to be sent out + * @sof: the SOF to indicate class of service + * + * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is + * a matching SW DDP context for this command. DDP is applicable + * only in case of READ if initiator or WRITE in case of + * responder (via checking XFER_RDY). In case this is an ABTS, send + * just invalidate the context. + **/ +static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, u8 sof) +{ + struct i40e_pf *pf = tx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct fc_frame_header *fh; + struct i40e_fcoe_ddp *ddp; + u32 f_ctl; + u8 r_ctl; + u16 xid; + + fh = (struct fc_frame_header *)skb_transport_header(skb); + f_ctl = ntoh24(fh->fh_f_ctl); + r_ctl = fh->fh_r_ctl; + ddp = NULL; + + if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) { + /* exchange responder? if so, XFER_RDY for write */ + xid = ntohs(fh->fh_rx_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); + } + } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) { + /* exchange originator, check READ cmd */ + xid = ntohs(fh->fh_ox_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); + } + } else if (r_ctl == FC_RCTL_BA_ABTS) { + /* exchange originator, check ABTS */ + xid = ntohs(fh->fh_ox_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp); + } + } +} + +/** + * i40e_fcoe_tso - set up FCoE TSO + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: the tso header length + * @sof: the SOF to indicate class of service + * + * Note must already have sof checked to be either class 2 or class 3 before + * calling this function. + * + * Returns 1 to indicate sequence segmentation offload is properly setup + * or returns 0 to indicate no tso is needed, otherwise returns error + * code to drop the frame. + **/ +static int i40e_fcoe_tso(struct i40e_ring *tx_ring, + struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len, u8 sof) +{ + struct i40e_tx_context_desc *context_desc; + u32 cd_type, cd_cmd, cd_tso_len, cd_mss; + struct fc_frame_header *fh; + u64 cd_type_cmd_tso_mss; + + /* must match gso type as FCoE */ + if (!skb_is_gso(skb)) + return 0; + + /* is it the expected gso type for FCoE ?*/ + if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + netdev_err(skb->dev, + "wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* header and trailer are inserted by hw */ + *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) + + sizeof(struct fcoe_crc_eof); + + /* check sof to decide a class 2 or 3 TSO */ + if (likely(i40e_fcoe_sof_is_class3(sof))) + cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3; + else + cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2; + + /* param field valid? */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF; + + /* fill the field values */ + cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + cd_type_cmd_tso_mss = + ((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | + ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + context_desc->tunneling_params = 0; + context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK) + >> I40E_TX_FLAGS_VLAN_SHIFT); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); + + return 1; +} + +/** + * i40e_fcoe_tx_map - build the tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: ptr to the size of the packet header + * @eof: the frame eof value + * + * Note, for FCoE, sof and eof are already checked + **/ +static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_tx_buffer *first, + u32 tx_flags, u8 hdr_len, u8 eof) +{ + u32 td_offset = 0; + u32 td_cmd = 0; + u32 maclen; + + /* insert CRC */ + td_cmd = I40E_TX_DESC_CMD_ICRC; + + /* setup MACLEN */ + maclen = skb_network_offset(skb); + if (tx_flags & I40E_TX_FLAGS_SW_VLAN) + maclen += sizeof(struct vlan_hdr); + + if (skb->protocol == htons(ETH_P_FCOE)) { + /* for FCoE, maclen should exclude ether type */ + maclen -= 2; + /* setup type as FCoE and EOF insertion */ + td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof)); + /* setup FCoELEN and FCLEN */ + td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT) | + ((sizeof(struct fc_frame_header) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)); + /* trim to exclude trailer */ + pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof)); + } + + /* MACLEN is ether header length in words not bytes */ + td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); +} + +/** + * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC + * @skb: the skb to be adjusted + * + * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then + * adjusts the skb header pointers correspondingly. Otherwise, returns false. + **/ +static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + skb_reset_mac_header(skb); + skb->mac_len = sizeof(struct ethhdr); + if (protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb); + + protocol = veth->h_vlan_encapsulated_proto; + skb->mac_len += sizeof(struct vlan_hdr); + } + + /* FCoE or FIP only */ + if ((protocol != htons(ETH_P_FIP)) && + (protocol != htons(ETH_P_FCOE))) + return -EINVAL; + + /* set header to L2 of FCoE/FIP */ + skb_set_network_header(skb, skb->mac_len); + if (protocol == htons(ETH_P_FIP)) + return 0; + + /* set header to L3 of FC */ + skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); + return 0; +} + +/** + * i40e_fcoe_xmit_frame - transmit buffer + * @skb: send buffer + * @netdev: the fcoe netdev + * + * Returns 0 if sent, else an error code + **/ +static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(skb->dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; + struct i40e_tx_buffer *first; + u32 tx_flags = 0; + u8 hdr_len = 0; + u8 sof = 0; + u8 eof = 0; + int fso; + + if (i40e_fcoe_set_skb_header(skb)) + goto out_drop; + + if (!i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* FIP is a regular L2 traffic w/o offload */ + if (skb->protocol == htons(ETH_P_FIP)) + goto out_send; + + /* check sof and eof, only supports FC Class 2 or 3 */ + if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) { + netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof); + goto out_drop; + } + + /* always do FCCRC for FCoE */ + tx_flags |= I40E_TX_FLAGS_FCCRC; + + /* check we should do sequence offload */ + fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof); + if (fso < 0) + goto out_drop; + else if (fso) + tx_flags |= I40E_TX_FLAGS_FSO; + else + i40e_fcoe_handle_ddp(tx_ring, skb, sof); + +out_send: + /* send out the packet */ + i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof); + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns error as operation not permitted + * + **/ +static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n"); + return -EPERM; +} + +/** + * i40e_fcoe_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * + **/ +static int i40e_fcoe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); + + return 0; +} + +static const struct net_device_ops i40e_fcoe_netdev_ops = { + .ndo_open = i40e_open, + .ndo_stop = i40e_close, + .ndo_get_stats64 = i40e_get_netdev_stats_struct, + .ndo_set_rx_mode = i40e_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40e_set_mac, + .ndo_change_mtu = i40e_fcoe_change_mtu, + .ndo_do_ioctl = i40e_ioctl, + .ndo_tx_timeout = i40e_tx_timeout, + .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, + .ndo_setup_tc = i40e_setup_tc, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40e_netpoll, +#endif + .ndo_start_xmit = i40e_fcoe_xmit_frame, + .ndo_fcoe_enable = i40e_fcoe_enable, + .ndo_fcoe_disable = i40e_fcoe_disable, + .ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get, + .ndo_fcoe_ddp_done = i40e_fcoe_ddp_put, + .ndo_fcoe_ddp_target = i40e_fcoe_ddp_target, + .ndo_set_features = i40e_fcoe_set_features, +}; + +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + +/** + * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI + * @vsi: pointer to the associated VSI struct + * @ctxt: pointer to the associated VSI context to be passed to HW + * + * Returns 0 on success or < 0 on error + **/ +void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) +{ + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_pf *pf = vsi->back; + + if (vsi->type != I40E_VSI_FCOE) + return; + + netdev->features = (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + + netdev->vlan_features = netdev->features; + netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1; + netdev->features |= NETIF_F_ALL_FCOE; + netdev->vlan_features |= NETIF_F_ALL_FCOE; + netdev->hw_features |= netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); + netdev->mtu = FCOE_MTU; + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); + /* set different dev_port value 1 for FCoE netdev than the default + * zero dev_port value for PF netdev, this helps biosdevname user + * tool to differentiate them correctly while both attached to the + * same PCI function. + */ + netdev->dev_port = 1; + i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); + i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); + i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); + i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + + /* use san mac */ + ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.san_addr); + /* fcoe netdev ops */ + netdev->netdev_ops = &i40e_fcoe_netdev_ops; +} + +/** + * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI + * @pf: the PF that VSI is associated with + * + **/ +void i40e_fcoe_vsi_setup(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi; + u16 seid; + int i; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) + return; + + BUG_ON(!pf->vsi[pf->lan_vsi]); + + for (i = 0; i < pf->num_alloc_vsi; i++) { + vsi = pf->vsi[i]; + if (vsi && vsi->type == I40E_VSI_FCOE) { + dev_warn(&pf->pdev->dev, + "FCoE VSI already created\n"); + return; + } + } + + seid = pf->vsi[pf->lan_vsi]->seid; + vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); + if (vsi) { + dev_dbg(&pf->pdev->dev, + "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", + vsi->seid, vsi->id, vsi->uplink_seid, seid); + } else { + dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); + } +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h new file mode 100644 index 000000000..0d49e2d15 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -0,0 +1,127 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_FCOE_H_ +#define _I40E_FCOE_H_ + +/* FCoE HW context helper macros */ +#define I40E_DDP_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i])) + +#define I40E_QUEUE_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i])) + +#define I40E_FILTER_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) + +/* receive queue descriptor filter status for FCoE */ +#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 +#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ +#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */ +#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */ +#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */ + +/* receive queue descriptor error codes for FCoE */ +#define I40E_RX_DESC_FCOE_ERROR_MASK \ + (I40E_RX_DESC_ERROR_L3L4E_PROT | \ + I40E_RX_DESC_ERROR_L3L4E_FC | \ + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \ + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN) + +/* receive queue descriptor programming error */ +#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \ + (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1) + +#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \ + (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) + +#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ + (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) +#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ + (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) + +#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ + I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) +#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \ + I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT + +/* FCoE DDP related definitions */ +#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ +#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ +#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */ +#define I40E_FCOE_DDP_PTR_ALIGN 16 +#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define I40E_FCOE_DDP_BUF_MIN 4096 +#define I40E_FCOE_DDP_MAX 2048 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 + +/* supported netdev features for FCoE */ +#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_FILTER) + +/* DDP context flags */ +enum i40e_fcoe_ddp_flags { + __I40E_FCOE_DDP_NONE = 1, + __I40E_FCOE_DDP_TARGET, + __I40E_FCOE_DDP_INITALIZED, + __I40E_FCOE_DDP_PROGRAMMED, + __I40E_FCOE_DDP_DONE, + __I40E_FCOE_DDP_ABORTED, + __I40E_FCOE_DDP_UNMAPPED, +}; + +/* DDP SW context struct */ +struct i40e_fcoe_ddp { + int len; + u16 xid; + u16 firstoff; + u16 lastsize; + u16 list_len; + u8 fcerr; + u8 prerr; + unsigned long flags; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; + +}; + +struct i40e_fcoe_ddp_pool { + struct dma_pool *pool; +}; + +struct i40e_fcoe { + unsigned long mode; + atomic_t refcnt; + struct i40e_fcoe_ddp_pool __percpu *ddp_pool; + struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX]; +}; + +#endif /* _I40E_FCOE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c new file mode 100644 index 000000000..9b987ccc9 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -0,0 +1,360 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_status.h" +#include "i40e_alloc.h" +#include "i40e_hmc.h" +#include "i40e_type.h" + +/** + * i40e_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + **/ +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40e_memory_type mem_type __attribute__((unused)); + struct i40e_hmc_sd_entry *sd_entry; + bool dma_mem_alloc_done = false; + struct i40e_dma_mem mem; + i40e_status ret_code; + u64 alloc_len; + + if (NULL == hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); + goto exit; + } + + if (sd_index >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); + goto exit; + } + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (I40E_SD_TYPE_PAGED == type) { + mem_type = i40e_mem_pd; + alloc_len = I40E_HMC_PAGED_BP_SIZE; + } else { + mem_type = i40e_mem_bp_jumbo; + alloc_len = direct_mode_sz; + } + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (I40E_SD_TYPE_PAGED == type) { + ret_code = i40e_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40e_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = + (struct i40e_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + sd_entry->u.pd_table.pd_page_addr = mem; + } else { + sd_entry->u.bp.addr = mem; + sd_entry->u.bp.sd_pd_index = sd_index; + } + /* initialize the sd entry */ + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + /* increment the ref count */ + I40E_INC_SD_REFCNT(&hmc_info->sd_table); + } + /* Increment backing page reference count */ + if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) + I40E_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (ret_code) + if (dma_mem_alloc_done) + i40e_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40e_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40e_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + **/ +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_dma_mem mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); + goto exit; + } + + /* find corresponding sd */ + sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); + if (I40E_SD_TYPE_PAGED != + hmc_info->sd_table.sd_entry[sd_idx].entry_type) + goto exit; + + rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + + pd_entry->bp.addr = mem; + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; + /* Set page address and valid bit */ + page_desc = mem.pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + /* Add the backing page physical address in the pd entry */ + memcpy(pd_addr, &page_desc, sizeof(u64)); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40E_INC_PD_REFCNT(pd_table); + } + I40E_INC_BP_REFCNT(&pd_entry->bp); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: distinguishes a VF from a PF + * + * This function: + * 1. Marks the entry in pd tabe (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + **/ +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + + /* calculate index */ + sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); + goto exit; + } + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { + ret_code = I40E_ERR_INVALID_SD_TYPE; + hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); + goto exit; + } + /* get the entry and decrease its ref counter */ + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40E_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + goto exit; + + /* mark the entry invalid */ + pd_entry->valid = false; + I40E_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + memset(pd_addr, 0, sizeof(u64)); + I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + + /* free memory here */ + ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (ret_code) + goto exit; + if (!pd_table->ref_cnt) + i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); +exit: + return ret_code; +} + +/** + * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + **/ +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); + + /* mark the entry invalid */ + sd_entry->valid = false; +exit: + return ret_code; +} + +/** + * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: used to distinguish between VF and PF + **/ +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + i40e_status ret_code = 0; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + if (is_pf) { + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + } else { + ret_code = I40E_NOT_SUPPORTED; + goto exit; + } + ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); + if (ret_code) + goto exit; +exit: + return ret_code; +} + +/** + * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + **/ +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + + /* mark the entry invalid */ + sd_entry->valid = false; + + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_page_new - Removes a PD page from sd entry. + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * @is_pf: used to distinguish between VF and PF + **/ +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + if (is_pf) { + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + } else { + ret_code = I40E_NOT_SUPPORTED; + goto exit; + } + /* free memory here */ + ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); + if (ret_code) + goto exit; +exit: + return ret_code; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h new file mode 100644 index 000000000..732a02660 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -0,0 +1,236 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(upper_32_bits(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index); +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c new file mode 100644 index 000000000..0079ad7bc --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -0,0 +1,1143 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_type.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_prototype.h" + +/* lan specific interface functions */ + +/** + * i40e_align_l2obj_base - aligns base object pointer to 512 bytes + * @offset: base address offset needing alignment + * + * Aligns the layer 2 function private memory so it's 512-byte aligned. + **/ +static u64 i40e_align_l2obj_base(u64 offset) +{ + u64 aligned_offset = offset; + + if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) + aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - + (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); + + return aligned_offset; +} + +/** + * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * Calculates the maximum amount of memory for the function required, based + * on the number of resources it must provide context for. + **/ +static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num) +{ + u64 fpm_size = 0; + + fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); + fpm_size = i40e_align_l2obj_base(fpm_size); + + return fpm_size; +} + +/** + * i40e_init_lan_hmc - initialize i40e_hmc_info struct + * @hw: pointer to the HW structure + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * This function will be called once per physical function initialization. + * It will fill out the i40e_hmc_obj_info structure for LAN objects based on + * the driver's provided input, as well as information from the HMC itself + * loaded from NVRAM. + * + * Assumptions: + * - HMC Resource Profile has been selected before calling this function. + **/ +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num) +{ + struct i40e_hmc_obj_info *obj, *full_obj; + i40e_status ret_code = 0; + u64 l2fpm_size; + u32 size_exp; + + hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; + hw->hmc.hmc_fn_id = hw->pf_id; + + /* allocate memory for hmc_obj */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, + sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) + hw->hmc.hmc_obj_virt_mem.va; + + /* The full object will be used to create the LAN HMC SD */ + full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; + full_obj->max_cnt = 0; + full_obj->cnt = 0; + full_obj->base = 0; + full_obj->size = 0; + + /* Tx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = txq_num; + obj->base = 0; + size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (txq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + txq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* Rx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = rxq_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (rxq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + rxq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); + obj->cnt = fcoe_cntx_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_cntx_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_cntx_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE filter information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); + obj->cnt = fcoe_filt_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * + hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_filt_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_filt_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + hw->hmc.first_sd_index = 0; + hw->hmc.sd_table.ref_cnt = 0; + l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, + fcoe_filt_num); + if (NULL == hw->hmc.sd_table.sd_entry) { + hw->hmc.sd_table.sd_cnt = (u32) + (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / + I40E_HMC_DIRECT_BP_SIZE; + + /* allocate the sd_entry members in the sd_table */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, + (sizeof(struct i40e_hmc_sd_entry) * + hw->hmc.sd_table.sd_cnt)); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.sd_table.sd_entry = + (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; + } + /* store in the LAN full object for later */ + full_obj->size = l2fpm_size; + +init_lan_hmc_out: + return ret_code; +} + +/** + * i40e_remove_pd_page - Remove a page from the page descriptor table + * @hw: pointer to the HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) invalid + * 2. write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for pd_entry + * assumptions: + * 1. caller can deallocate the memory used by pd after this function + * returns. + **/ +static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + + if (!i40e_prep_remove_pd_page(hmc_info, idx)) + ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_remove_sd_bp - remove a backing page from a segment descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * + * This function: + * 1. Marks the entry in sd table (for direct address mode) invalid + * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set + * to 0) and PMSDDATAHIGH to invalidate the sd page + * 3. Decrement the ref count for the sd_entry + * assumptions: + * 1. caller can deallocate the memory used by backing storage after this + * function returns. + **/ +static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + + if (!i40e_prep_remove_sd_bp(hmc_info, idx)) + ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_create_lan_hmc_object - allocate backing store for hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + **/ +static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 pd_idx = 0, pd_lmt = 0; + bool pd_error = false; + u32 sd_idx, sd_lmt; + u64 sd_size; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); + goto exit; + } + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + /* find pd index */ + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + /* This is to cover for cases where you may not want to have an SD with + * the full 2M memory but something smaller. By not filling out any + * size, the function will default the SD size to be 2M. + */ + if (info->direct_mode_sz == 0) + sd_size = I40E_HMC_DIRECT_BP_SIZE; + else + sd_size = info->direct_mode_sz; + + /* check if all the sds are valid. If not, allocate a page and + * initialize it. + */ + for (j = sd_idx; j < sd_lmt; j++) { + /* update the sd table entry */ + ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, + info->entry_type, + sd_size); + if (ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + /* check if all the pds in this sd are valid. If not, + * allocate a page and initialize it. + */ + + /* find pd_idx and pd_lmt in this sd */ + pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + ((j + 1) * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40e_add_pd_table_entry(hw, + info->hmc_info, + i); + if (ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + /* remove the backing pages from pd_idx1 to i */ + while (i && (i > pd_idx1)) { + i40e_remove_pd_bp(hw, info->hmc_info, + (i - 1)); + i--; + } + } + } + if (!sd_entry->valid) { + sd_entry->valid = true; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + I40E_SET_PF_SD_ENTRY(hw, + sd_entry->u.pd_table.pd_page_addr.pa, + j, sd_entry->entry_type); + break; + case I40E_SD_TYPE_DIRECT: + I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, + j, sd_entry->entry_type); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + goto exit; + } + } + } + goto exit; + +exit_sd_error: + /* cleanup for sd entries from j to sd_idx */ + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + ((j - 1) * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + i40e_remove_pd_bp(hw, info->hmc_info, i); + } + i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); + break; + case I40E_SD_TYPE_DIRECT: + i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + break; + } + j--; + } +exit: + return ret_code; +} + +/** + * i40e_configure_lan_hmc - prepare the HMC backing store + * @hw: pointer to the hw structure + * @model: the model for the layout of the SD/PD tables + * + * - This function will be called once per physical function initialization. + * - This function will be called after i40e_init_lan_hmc() and before + * any LAN/FCoE HMC objects can be created. + **/ +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model) +{ + struct i40e_hmc_lan_create_obj_info info; + i40e_status ret_code = 0; + u8 hmc_fn_id = hw->hmc.hmc_fn_id; + struct i40e_hmc_obj_info *obj; + + /* Initialize part of the create object info struct */ + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; + + /* Build the SD entry for the LAN objects */ + switch (model) { + case I40E_HMC_MODEL_DIRECT_PREFERRED: + case I40E_HMC_MODEL_DIRECT_ONLY: + info.entry_type = I40E_SD_TYPE_DIRECT; + /* Make one big object, a single SD */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + goto try_type_paged; + else if (ret_code) + goto configure_lan_hmc_out; + /* else clause falls through the break */ + break; + case I40E_HMC_MODEL_PAGED_ONLY: +try_type_paged: + info.entry_type = I40E_SD_TYPE_PAGED; + /* Make one big object in the PD table */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code) + goto configure_lan_hmc_out; + break; + default: + /* unsupported type */ + ret_code = I40E_ERR_INVALID_SD_TYPE; + hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", + ret_code); + goto configure_lan_hmc_out; + } + + /* Configure and program the FPM registers so objects can be created */ + + /* Tx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); + + /* Rx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); + + /* FCoE contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); + + /* FCoE filters */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); + +configure_lan_hmc_out: + return ret_code; +} + +/** + * i40e_delete_hmc_object - remove hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_delete_obj_info struct + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + **/ +static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_table *pd_table; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 sd_idx, sd_lmt; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); + goto exit; + } + + if (NULL == info->hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); + goto exit; + } + + if (NULL == info->hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); + goto exit; + } + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40E_HMC_PD_CNT_IN_SD; + + if (I40E_SD_TYPE_PAGED != + info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) + continue; + + rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; + + pd_table = + &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); + if (ret_code) + goto exit; + } + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40E_SD_TYPE_DIRECT: + ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); + if (ret_code) + goto exit; + break; + case I40E_SD_TYPE_PAGED: + ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); + if (ret_code) + goto exit; + break; + default: + break; + } + } +exit: + return ret_code; +} + +/** + * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory + * @hw: pointer to the hw structure + * + * This must be called by drivers as they are shutting down and being + * removed from the OS. + **/ +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) +{ + struct i40e_hmc_lan_delete_obj_info info; + i40e_status ret_code; + + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.count = 1; + + /* delete the object */ + ret_code = i40e_delete_lan_hmc_object(hw, &info); + + /* free the SD table entry for LAN */ + i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); + hw->hmc.sd_table.sd_cnt = 0; + hw->hmc.sd_table.sd_entry = NULL; + + /* free memory used for hmc_obj */ + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + hw->hmc.hmc_obj = NULL; + + return ret_code; +} + +#define I40E_HMC_STORE(_struct, _ele) \ + offsetof(struct _struct, _ele), \ + FIELD_SIZEOF(struct _struct, _ele) + +struct i40e_context_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +/* LAN Tx Queue Context */ +static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { + /* Field Width LSB */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, +/* line 1 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, +/* line 7 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, + { 0 } +}; + +/* LAN Rx Queue Context */ +static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { + /* Field Width LSB */ + { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, + { 0 } +}; + +/** + * i40e_write_byte - replace HMC context byte + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = ((u8)1 << ce_info->width) - 1; + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_byte, dest, sizeof(dest_byte)); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_byte, sizeof(dest_byte)); +} + +/** + * i40e_write_word - replace HMC context word + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u16 src_word, mask; + u8 *from, *dest; + u16 shift_width; + __le16 dest_word; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = ((u16)1 << ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_word, dest, sizeof(dest_word)); + + dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ + dest_word |= cpu_to_le16(src_word); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_word, sizeof(dest_word)); +} + +/** + * i40e_write_dword - replace HMC context dword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u32 src_dword, mask; + u8 *from, *dest; + u16 shift_width; + __le32 dest_dword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = ((u32)1 << ce_info->width) - 1; + else + mask = ~(u32)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_dword, dest, sizeof(dest_dword)); + + dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ + dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_dword, sizeof(dest_dword)); +} + +/** + * i40e_write_qword - replace HMC context qword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u64 src_qword, mask; + u8 *from, *dest; + u16 shift_width; + __le64 dest_qword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = ((u64)1 << ce_info->width) - 1; + else + mask = ~(u64)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_qword, dest, sizeof(dest_qword)); + + dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ + dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_qword, sizeof(dest_qword)); +} + +/** + * i40e_clear_hmc_context - zero out the HMC context bits + * @hw: the hardware struct + * @context_bytes: pointer to the context bit array (DMA memory) + * @hmc_type: the type of HMC resource + **/ +static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, + u8 *context_bytes, + enum i40e_hmc_lan_rsrc_type hmc_type) +{ + /* clean the bit array */ + memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); + + return 0; +} + +/** + * i40e_set_hmc_context - replace HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static i40e_status i40e_set_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + + /* we have to deal with each element of the HMC using the + * correct size so that we are correct regardless of the + * endianness of the machine + */ + switch (ce_info[f].size_of) { + case 1: + i40e_write_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_write_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_write_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_write_qword(context_bytes, &ce_info[f], dest); + break; + } + } + + return 0; +} + +/** + * i40e_hmc_get_object_va - retrieves an object's virtual address + * @hmc_info: pointer to i40e_hmc_info struct + * @object_base: pointer to u64 to get the va + * @rsrc_type: the hmc resource type + * @obj_idx: hmc object index + * + * This function retrieves the object's virtual address from the object + * base pointer. This function is used for LAN Queue contexts. + **/ +static +i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, + u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) +{ + u32 obj_offset_in_sd, obj_offset_in_pd; + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + struct i40e_hmc_pd_entry *pd_entry; + u32 pd_idx, pd_lmt, rel_pd_idx; + u64 obj_offset_in_fpm; + u32 sd_idx, sd_lmt; + + if (NULL == hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); + goto exit; + } + if (NULL == hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); + goto exit; + } + if (NULL == object_base) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); + goto exit; + } + if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { + hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", + ret_code); + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + goto exit; + } + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &sd_idx, &sd_lmt); + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + + hmc_info->hmc_obj[rsrc_type].size * obj_idx; + + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &pd_idx, &pd_lmt); + rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; + obj_offset_in_pd = (u32)(obj_offset_in_fpm % + I40E_HMC_PAGED_BP_SIZE); + *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; + } else { + obj_offset_in_sd = (u32)(obj_offset_in_fpm % + I40E_HMC_DIRECT_BP_SIZE); + *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; + } +exit: + return ret_code; +} + +/** + * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); +} + +/** + * i40e_set_lan_tx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); +} + +/** + * i40e_set_lan_rx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h new file mode 100644 index 000000000..e74128db5 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -0,0 +1,181 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c new file mode 100644 index 000000000..5b5bea159 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -0,0 +1,10368 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* Local includes */ +#include "i40e.h" +#include "i40e_diag.h" +#ifdef CONFIG_I40E_VXLAN +#include +#endif + +const char i40e_driver_name[] = "i40e"; +static const char i40e_driver_string[] = + "Intel(R) Ethernet Connection XL710 Network Driver"; + +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 2 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) DRV_KERN +const char i40e_driver_version_str[] = DRV_VERSION; +static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; + +/* a bit of forward declarations */ +static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); +static void i40e_handle_reset_warning(struct i40e_pf *pf); +static int i40e_add_vsi(struct i40e_vsi *vsi); +static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); +static int i40e_setup_misc_vector(struct i40e_pf *pf); +static void i40e_determine_queue_usage(struct i40e_pf *pf); +static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fdir_sb_setup(struct i40e_pf *pf); +static int i40e_veb_get_bw_info(struct i40e_veb *veb); + +/* i40e_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id i40e_pci_tbl[] = { + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); + +#define I40E_MAX_VF_COUNT 128 +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, + u64 size, u32 alignment) +{ + struct i40e_pf *pf = (struct i40e_pf *)hw->back; + + mem->size = ALIGN(size, alignment); + mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40e_pf *pf = (struct i40e_pf *)hw->back; + + dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); + mem->va = NULL; + mem->pa = 0; + mem->size = 0; + + return 0; +} + +/** + * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, + u32 size) +{ + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +{ + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + mem->va = NULL; + mem->size = 0; + + return 0; +} + +/** + * i40e_get_lump - find a lump of free generic resource + * @pf: board private structure + * @pile: the pile of resource to search + * @needed: the number of items needed + * @id: an owner id to stick on the items assigned + * + * Returns the base item index of the lump, or negative for error + * + * The search_hint trick and lack of advanced fit-finding only work + * because we're highly likely to have all the same size lump requests. + * Linear search time and any fragmentation should be minimal. + **/ +static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, + u16 needed, u16 id) +{ + int ret = -ENOMEM; + int i, j; + + if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { + dev_info(&pf->pdev->dev, + "param err: pile=%p needed=%d id=0x%04x\n", + pile, needed, id); + return -EINVAL; + } + + /* start the linear search with an imperfect hint */ + i = pile->search_hint; + while (i < pile->num_entries) { + /* skip already allocated entries */ + if (pile->list[i] & I40E_PILE_VALID_BIT) { + i++; + continue; + } + + /* do we have enough in this lump? */ + for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { + if (pile->list[i+j] & I40E_PILE_VALID_BIT) + break; + } + + if (j == needed) { + /* there was enough, so assign it to the requestor */ + for (j = 0; j < needed; j++) + pile->list[i+j] = id | I40E_PILE_VALID_BIT; + ret = i; + pile->search_hint = i + j; + break; + } else { + /* not enough, so skip over it and continue looking */ + i += j; + } + } + + return ret; +} + +/** + * i40e_put_lump - return a lump of generic resource + * @pile: the pile of resource to search + * @index: the base item index + * @id: the owner id of the items assigned + * + * Returns the count of items in the lump + **/ +static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) +{ + int valid_id = (id | I40E_PILE_VALID_BIT); + int count = 0; + int i; + + if (!pile || index >= pile->num_entries) + return -EINVAL; + + for (i = index; + i < pile->num_entries && pile->list[i] == valid_id; + i++) { + pile->list[i] = 0; + count++; + } + + if (count && index < pile->search_hint) + pile->search_hint = index; + + return count; +} + +/** + * i40e_find_vsi_from_id - searches for the vsi with the given id + * @pf - the pf structure to search for the vsi + * @id - id of the vsi it is searching for + **/ +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->id == id)) + return pf->vsi[i]; + + return NULL; +} + +/** + * i40e_service_event_schedule - Schedule the service task to wake up + * @pf: board private structure + * + * If not already scheduled, this puts the task into the work queue + **/ +static void i40e_service_event_schedule(struct i40e_pf *pf) +{ + if (!test_bit(__I40E_DOWN, &pf->state) && + !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && + !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) + schedule_work(&pf->service_task); +} + +/** + * i40e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * + * If any port has noticed a Tx timeout, it is likely that the whole + * device is munged, not just the one netdev port, so go for the full + * reset. + **/ +#ifdef I40E_FCOE +void i40e_tx_timeout(struct net_device *netdev) +#else +static void i40e_tx_timeout(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) + pf->tx_timeout_recovery_level = 1; + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d\n", + pf->tx_timeout_recovery_level); + + switch (pf->tx_timeout_recovery_level) { + case 0: + /* disable and re-enable queues for the VSI */ + if (in_interrupt()) { + set_bit(__I40E_REINIT_REQUESTED, &pf->state); + set_bit(__I40E_REINIT_REQUESTED, &vsi->state); + } else { + i40e_vsi_reinit_locked(vsi); + } + break; + case 1: + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + break; + case 2: + set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + break; + case 3: + set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + set_bit(__I40E_DOWN_REQUESTED, &pf->state); + set_bit(__I40E_DOWN_REQUESTED, &vsi->state); + break; + } + i40e_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40e_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ +struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) +{ + return &vsi->net_stats; +} + +/** + * i40e_get_netdev_stats_struct - Get statistics for netdev interface + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ +#ifdef I40E_FCOE +struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_vsi *vsi = np->vsi; + struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + int i; + + if (test_bit(__I40E_DOWN, &vsi->state)) + return stats; + + if (!vsi->tx_rings) + return stats; + + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + u64 bytes, packets; + unsigned int start; + + tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; + rx_ring = &tx_ring[1]; + + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + rcu_read_unlock(); + + /* following stats updated by i40e_watchdog_subtask() */ + stats->multicast = vsi_stats->multicast; + stats->tx_errors = vsi_stats->tx_errors; + stats->tx_dropped = vsi_stats->tx_dropped; + stats->rx_errors = vsi_stats->rx_errors; + stats->rx_crc_errors = vsi_stats->rx_crc_errors; + stats->rx_length_errors = vsi_stats->rx_length_errors; + + return stats; +} + +/** + * i40e_vsi_reset_stats - Resets all stats of the given vsi + * @vsi: the VSI to have its stats reset + **/ +void i40e_vsi_reset_stats(struct i40e_vsi *vsi) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!vsi) + return; + + ns = i40e_get_vsi_stats_struct(vsi); + memset(ns, 0, sizeof(*ns)); + memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); + memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); + memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); + if (vsi->rx_rings && vsi->rx_rings[0]) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + memset(&vsi->rx_rings[i]->stats, 0 , + sizeof(vsi->rx_rings[i]->stats)); + memset(&vsi->rx_rings[i]->rx_stats, 0 , + sizeof(vsi->rx_rings[i]->rx_stats)); + memset(&vsi->tx_rings[i]->stats, 0 , + sizeof(vsi->tx_rings[i]->stats)); + memset(&vsi->tx_rings[i]->tx_stats, 0, + sizeof(vsi->tx_rings[i]->tx_stats)); + } + } + vsi->stat_offsets_loaded = false; +} + +/** + * i40e_pf_reset_stats - Reset all of the stats for the given PF + * @pf: the PF to be reset + **/ +void i40e_pf_reset_stats(struct i40e_pf *pf) +{ + int i; + + memset(&pf->stats, 0, sizeof(pf->stats)); + memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); + pf->stat_offsets_loaded = false; + + for (i = 0; i < I40E_MAX_VEB; i++) { + if (pf->veb[i]) { + memset(&pf->veb[i]->stats, 0, + sizeof(pf->veb[i]->stats)); + memset(&pf->veb[i]->stats_offsets, 0, + sizeof(pf->veb[i]->stats_offsets)); + pf->veb[i]->stat_offsets_loaded = false; + } + } +} + +/** + * i40e_stat_update48 - read and update a 48 bit stat from the chip + * @hw: ptr to the hardware info + * @hireg: the high 32 bit reg to read + * @loreg: the low 32 bit reg to read + * @offset_loaded: has the initial offset been loaded yet + * @offset: ptr to current offset value + * @stat: ptr to the stat + * + * Since the device stats are not reset at PFReset, they likely will not + * be zeroed when the driver starts. We'll save the first values read + * and use them as offsets to be subtracted from the raw values in order + * to report stats that count from zero. In the process, we also manage + * the potential roll-over. + **/ +static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u64 new_data; + + if (hw->device_id == I40E_DEV_ID_QEMU) { + new_data = rd32(hw, loreg); + new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + } else { + new_data = rd64(hw, loreg); + } + if (!offset_loaded) + *offset = new_data; + if (likely(new_data >= *offset)) + *stat = new_data - *offset; + else + *stat = (new_data + ((u64)1 << 48)) - *offset; + *stat &= 0xFFFFFFFFFFFFULL; +} + +/** + * i40e_stat_update32 - read and update a 32 bit stat from the chip + * @hw: ptr to the hardware info + * @reg: the hw reg to read + * @offset_loaded: has the initial offset been loaded yet + * @offset: ptr to current offset value + * @stat: ptr to the stat + **/ +static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + if (!offset_loaded) + *offset = new_data; + if (likely(new_data >= *offset)) + *stat = (u32)(new_data - *offset); + else + *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); +} + +/** + * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. + * @vsi: the VSI to be updated + **/ +void i40e_update_eth_stats(struct i40e_vsi *vsi) +{ + int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + + es = &vsi->eth_stats; + oes = &vsi->eth_stats_offsets; + + /* Gather up the stats that the hw collects */ + i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_errors, &es->tx_errors); + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_discards, &es->rx_discards); + i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_unknown_protocol, &es->rx_unknown_protocol); + i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_errors, &es->tx_errors); + + i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), + I40E_GLV_GORCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_bytes, &es->rx_bytes); + i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), + I40E_GLV_UPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_unicast, &es->rx_unicast); + i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), + I40E_GLV_MPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_multicast, &es->rx_multicast); + i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), + I40E_GLV_BPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_broadcast, &es->rx_broadcast); + + i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), + I40E_GLV_GOTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_bytes, &es->tx_bytes); + i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), + I40E_GLV_UPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_unicast, &es->tx_unicast); + i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), + I40E_GLV_MPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_multicast, &es->tx_multicast); + i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), + I40E_GLV_BPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_broadcast, &es->tx_broadcast); + vsi->stat_offsets_loaded = true; +} + +/** + * i40e_update_veb_stats - Update Switch component statistics + * @veb: the VEB being updated + **/ +static void i40e_update_veb_stats(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + int idx = 0; + + idx = veb->stats_idx; + es = &veb->stats; + oes = &veb->stats_offsets; + + /* Gather up the stats that the hw collects */ + i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), + veb->stat_offsets_loaded, + &oes->tx_discards, &es->tx_discards); + if (hw->revision_id > 0) + i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), + veb->stat_offsets_loaded, + &oes->rx_unknown_protocol, + &es->rx_unknown_protocol); + i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), + veb->stat_offsets_loaded, + &oes->rx_bytes, &es->rx_bytes); + i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_unicast, &es->rx_unicast); + i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_multicast, &es->rx_multicast); + i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_broadcast, &es->rx_broadcast); + + i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_bytes, &es->tx_bytes); + i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_unicast, &es->tx_unicast); + i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_multicast, &es->tx_multicast); + i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_broadcast, &es->tx_broadcast); + veb->stat_offsets_loaded = true; +} + +#ifdef I40E_FCOE +/** + * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. + * @vsi: the VSI that is capable of doing FCoE + **/ +static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_fcoe_stats *ofs; + struct i40e_fcoe_stats *fs; /* device's eth stats */ + int idx; + + if (vsi->type != I40E_VSI_FCOE) + return; + + idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; + fs = &vsi->fcoe_stats; + ofs = &vsi->fcoe_stats_offsets; + + i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); + i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); + i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); + i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); + i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); + i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); + i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_last_error, &fs->fcoe_last_error); + i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); + + vsi->fcoe_stat_offsets_loaded = true; +} + +#endif +/** + * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode + * @pf: the corresponding PF + * + * Update the Rx XOFF counter (PAUSE frames) in link flow control mode + **/ +static void i40e_update_link_xoff_rx(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + struct i40e_hw *hw = &pf->hw; + u64 xoff = 0; + u16 i, v; + + if ((hw->fc.current_mode != I40E_FC_FULL) && + (hw->fc.current_mode != I40E_FC_RX_PAUSE)) + return; + + xoff = nsd->link_xoff_rx; + i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_rx, &nsd->link_xoff_rx); + + /* No new LFC xoff rx */ + if (!(nsd->link_xoff_rx - xoff)) + return; + + /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + + if (!vsi || !vsi->tx_rings[0]) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *ring = vsi->tx_rings[i]; + clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); + } + } +} + +/** + * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode + * @pf: the corresponding PF + * + * Update the Rx XOFF counter (PAUSE frames) in PFC mode + **/ +static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; + struct i40e_dcbx_config *dcb_cfg; + struct i40e_hw *hw = &pf->hw; + u16 i, v; + u8 tc; + + dcb_cfg = &hw->local_dcbx_config; + + /* See if DCB enabled with PFC TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || + !(dcb_cfg->pfc.pfcenable)) { + i40e_update_link_xoff_rx(pf); + return; + } + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + u64 prio_xoff = nsd->priority_xoff_rx[i]; + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_rx[i], + &nsd->priority_xoff_rx[i]); + + /* No new PFC xoff rx */ + if (!(nsd->priority_xoff_rx[i] - prio_xoff)) + continue; + /* Get the TC for given priority */ + tc = dcb_cfg->etscfg.prioritytable[i]; + xoff[tc] = true; + } + + /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + + if (!vsi || !vsi->tx_rings[0]) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *ring = vsi->tx_rings[i]; + + tc = ring->dcb_tc; + if (xoff[tc]) + clear_bit(__I40E_HANG_CHECK_ARMED, + &ring->state); + } + } +} + +/** + * i40e_update_vsi_stats - Update the vsi statistics counters. + * @vsi: the VSI to be updated + * + * There are a few instances where we store the same stat in a + * couple of different structs. This is partly because we have + * the netdev stats that need to be filled out, which is slightly + * different from the "eth_stats" defined by the chip and used in + * VF communications. We sort it out here. + **/ +static void i40e_update_vsi_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + u32 tx_restart, tx_busy; + struct i40e_ring *p; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 q; + + if (test_bit(__I40E_DOWN, &vsi->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + ns = i40e_get_vsi_stats_struct(vsi); + ons = &vsi->net_stats_offsets; + es = &vsi->eth_stats; + oes = &vsi->eth_stats_offsets; + + /* Gather up the netdev and vsi stats that the driver collects + * on the fly during packet processing + */ + rx_b = rx_p = 0; + tx_b = tx_p = 0; + tx_restart = tx_busy = 0; + rx_page = 0; + rx_buf = 0; + rcu_read_lock(); + for (q = 0; q < vsi->num_queue_pairs; q++) { + /* locate Tx ring */ + p = ACCESS_ONCE(vsi->tx_rings[q]); + + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + tx_b += bytes; + tx_p += packets; + tx_restart += p->tx_stats.restart_queue; + tx_busy += p->tx_stats.tx_busy; + + /* Rx queue is part of the same block as Tx queue */ + p = &p[1]; + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + rx_b += bytes; + rx_p += packets; + rx_buf += p->rx_stats.alloc_buff_failed; + rx_page += p->rx_stats.alloc_page_failed; + } + rcu_read_unlock(); + vsi->tx_restart = tx_restart; + vsi->tx_busy = tx_busy; + vsi->rx_page_failed = rx_page; + vsi->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + /* update netdev stats from eth stats */ + i40e_update_eth_stats(vsi); + ons->tx_errors = oes->tx_errors; + ns->tx_errors = es->tx_errors; + ons->multicast = oes->rx_multicast; + ns->multicast = es->rx_multicast; + ons->rx_dropped = oes->rx_discards; + ns->rx_dropped = es->rx_discards; + ons->tx_dropped = oes->tx_discards; + ns->tx_dropped = es->tx_discards; + + /* pull in a couple PF stats if this is the main vsi */ + if (vsi == pf->vsi[pf->lan_vsi]) { + ns->rx_crc_errors = pf->stats.crc_errors; + ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; + ns->rx_length_errors = pf->stats.rx_length_errors; + } +} + +/** + * i40e_update_pf_stats - Update the PF statistics counters. + * @pf: the PF to be updated + **/ +static void i40e_update_pf_stats(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + struct i40e_hw *hw = &pf->hw; + u32 val; + int i; + + i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_bytes, &nsd->eth.rx_bytes); + i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_bytes, &nsd->eth.tx_bytes); + i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_discards, + &nsd->eth.rx_discards); + i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), + I40E_GLPRT_UPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_unicast, + &nsd->eth.rx_unicast); + i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), + I40E_GLPRT_MPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_multicast, + &nsd->eth.rx_multicast); + i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), + I40E_GLPRT_BPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_broadcast, + &nsd->eth.rx_broadcast); + i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), + I40E_GLPRT_UPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_unicast, + &nsd->eth.tx_unicast); + i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), + I40E_GLPRT_MPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_multicast, + &nsd->eth.tx_multicast); + i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), + I40E_GLPRT_BPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_broadcast, + &nsd->eth.tx_broadcast); + + i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), + pf->stat_offsets_loaded, + &osd->tx_dropped_link_down, + &nsd->tx_dropped_link_down); + + i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), + pf->stat_offsets_loaded, + &osd->crc_errors, &nsd->crc_errors); + + i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), + pf->stat_offsets_loaded, + &osd->illegal_bytes, &nsd->illegal_bytes); + + i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), + pf->stat_offsets_loaded, + &osd->mac_local_faults, + &nsd->mac_local_faults); + i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), + pf->stat_offsets_loaded, + &osd->mac_remote_faults, + &nsd->mac_remote_faults); + + i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_length_errors, + &nsd->rx_length_errors); + + i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xon_rx, &nsd->link_xon_rx); + i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xon_tx, &nsd->link_xon_tx); + i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ + i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_tx, &nsd->link_xoff_tx); + + for (i = 0; i < 8; i++) { + i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_rx[i], + &nsd->priority_xon_rx[i]); + i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_tx[i], + &nsd->priority_xon_tx[i]); + i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_tx[i], + &nsd->priority_xoff_tx[i]); + i40e_stat_update32(hw, + I40E_GLPRT_RXON2OFFCNT(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_2_xoff[i], + &nsd->priority_xon_2_xoff[i]); + } + + i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), + I40E_GLPRT_PRC64L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_64, &nsd->rx_size_64); + i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), + I40E_GLPRT_PRC127L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_127, &nsd->rx_size_127); + i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), + I40E_GLPRT_PRC255L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_255, &nsd->rx_size_255); + i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), + I40E_GLPRT_PRC511L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_511, &nsd->rx_size_511); + i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), + I40E_GLPRT_PRC1023L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_1023, &nsd->rx_size_1023); + i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), + I40E_GLPRT_PRC1522L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_1522, &nsd->rx_size_1522); + i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), + I40E_GLPRT_PRC9522L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_big, &nsd->rx_size_big); + + i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), + I40E_GLPRT_PTC64L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_64, &nsd->tx_size_64); + i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), + I40E_GLPRT_PTC127L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_127, &nsd->tx_size_127); + i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), + I40E_GLPRT_PTC255L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_255, &nsd->tx_size_255); + i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), + I40E_GLPRT_PTC511L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_511, &nsd->tx_size_511); + i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), + I40E_GLPRT_PTC1023L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_1023, &nsd->tx_size_1023); + i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), + I40E_GLPRT_PTC1522L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_1522, &nsd->tx_size_1522); + i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), + I40E_GLPRT_PTC9522L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_big, &nsd->tx_size_big); + + i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_undersize, &nsd->rx_undersize); + i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_fragments, &nsd->rx_fragments); + i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_oversize, &nsd->rx_oversize); + i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_jabber, &nsd->rx_jabber); + + /* FDIR stats */ + i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + pf->stat_offsets_loaded, + &osd->fd_atr_match, &nsd->fd_atr_match); + i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + pf->stat_offsets_loaded, + &osd->fd_sb_match, &nsd->fd_sb_match); + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + nsd->tx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + nsd->rx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + i40e_stat_update32(hw, I40E_PRTPM_TLPIC, + pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count); + i40e_stat_update32(hw, I40E_PRTPM_RLPIC, + pf->stat_offsets_loaded, + &osd->rx_lpi_count, &nsd->rx_lpi_count); + + pf->stat_offsets_loaded = true; +} + +/** + * i40e_update_stats - Update the various statistics counters. + * @vsi: the VSI to be updated + * + * Update the various stats for this VSI and its related entities. + **/ +void i40e_update_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + if (vsi == pf->vsi[pf->lan_vsi]) + i40e_update_pf_stats(pf); + + i40e_update_vsi_stats(vsi); +#ifdef I40E_FCOE + i40e_update_fcoe_stats(vsi); +#endif +} + +/** + * i40e_find_filter - Search VSI filter list for specific mac/vlan filter + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns ptr to the filter object or NULL + **/ +static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (vlan == f->vlan) && + (!is_vf || f->is_vf) && + (!is_netdev || f->is_netdev)) + return f; + } + return NULL; +} + +/** + * i40e_find_mac - Find a mac addr in the macvlan filters list + * @vsi: the VSI to be searched + * @macaddr: the MAC address we are searching for + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns the first filter with the provided MAC address or NULL if + * MAC address was not found + **/ +struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (!is_vf || f->is_vf) && + (!is_netdev || f->is_netdev)) + return f; + } + return NULL; +} + +/** + * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode + * @vsi: the VSI to be searched + * + * Returns true if VSI is in vlan mode or false otherwise + **/ +bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + + /* Only -1 for all the filters denotes not in vlan mode + * so we have to go through all the list in order to make sure + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan >= 0) + return true; + } + + return false; +} + +/** + * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans + * @vsi: the VSI to be searched + * @macaddr: the mac address to be filtered + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Goes through all the macvlan filters and adds a + * macvlan filter for each unique vlan that already exists + * + * Returns first filter found on success, else NULL + **/ +struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (!i40e_find_filter(vsi, macaddr, f->vlan, + is_vf, is_netdev)) { + if (!i40e_add_filter(vsi, macaddr, f->vlan, + is_vf, is_netdev)) + return NULL; + } + } + + return list_first_entry_or_null(&vsi->mac_filter_list, + struct i40e_mac_filter, list); +} + +/** + * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM + * @vsi: the PF Main VSI - inappropriate for any other VSI + * @macaddr: the MAC address + * + * Some older firmware configurations set up a default promiscuous VLAN + * filter that needs to be removed. + **/ +static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +{ + struct i40e_aqc_remove_macvlan_element_data element; + struct i40e_pf *pf = vsi->back; + i40e_status aq_ret; + + /* Only appropriate for the PF main VSI */ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + if (aq_ret) + return -ENOENT; + + return 0; +} + +/** + * i40e_add_filter - Add a mac/vlan filter to the VSI + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto add_filter_out; + + ether_addr_copy(f->macaddr, macaddr); + f->vlan = vlan; + f->changed = true; + + INIT_LIST_HEAD(&f->list); + list_add(&f->list, &vsi->mac_filter_list); + } + + /* increment counter and add a new flag if needed */ + if (is_vf) { + if (!f->is_vf) { + f->is_vf = true; + f->counter++; + } + } else if (is_netdev) { + if (!f->is_netdev) { + f->is_netdev = true; + f->counter++; + } + } else { + f->counter++; + } + + /* changed tells sync_filters_subtask to + * push the filter down to the firmware + */ + if (f->changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } + +add_filter_out: + return f; +} + +/** + * i40e_del_filter - Remove a mac/vlan filter from the VSI + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure it's a VF filter, else doesn't matter + * @is_netdev: make sure it's a netdev filter, else doesn't matter + **/ +void i40e_del_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return; + + f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); + if (!f || f->counter == 0) + return; + + if (is_vf) { + if (f->is_vf) { + f->is_vf = false; + f->counter--; + } + } else if (is_netdev) { + if (f->is_netdev) { + f->is_netdev = false; + f->counter--; + } + } else { + /* make sure we don't remove a filter in use by VF or netdev */ + int min_f = 0; + min_f += (f->is_vf ? 1 : 0); + min_f += (f->is_netdev ? 1 : 0); + + if (f->counter > min_f) + f->counter--; + } + + /* counter == 0 tells sync_filters_subtask to + * remove the filter from the firmware's list + */ + if (f->counter == 0) { + f->changed = true; + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } +} + +/** + * i40e_set_mac - NDO callback to set mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +#ifdef I40E_FCOE +int i40e_set_mac(struct net_device *netdev, void *p) +#else +static int i40e_set_mac(struct net_device *netdev, void *p) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct sockaddr *addr = p; + struct i40e_mac_filter *f; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + addr->sa_data); + return 0; + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(hw->mac.addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", + hw->mac.addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + if (vsi->type == I40E_VSI_MAIN) { + i40e_status ret; + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + addr->sa_data, NULL); + if (ret) { + netdev_info(netdev, + "Addr change for Main VSI failed: %d\n", + ret); + return -EADDRNOTAVAIL; + } + } + + if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, netdev->dev_addr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, + false, false); + } + + if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { + struct i40e_aqc_add_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, hw->mac.addr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, + false, false); + if (f) + f->is_laa = true; + } + + i40e_sync_vsi_filters(vsi); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + return 0; +} + +/** + * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc + * @vsi: the VSI being setup + * @ctxt: VSI context structure + * @enabled_tc: Enabled TCs bitmap + * @is_add: True if called before Add VSI + * + * Setup VSI queue mapping for enabled traffic classes. + **/ +#ifdef I40E_FCOE +void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, + bool is_add) +#else +static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, + bool is_add) +#endif +{ + struct i40e_pf *pf = vsi->back; + u16 sections = 0; + u8 netdev_tc = 0; + u16 numtc = 0; + u16 qcount; + u8 offset; + u16 qmap; + int i; + u16 num_tc_qps = 0; + + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; + offset = 0; + + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Find numtc from enabled TC bitmap */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) /* TC is enabled */ + numtc++; + } + if (!numtc) { + dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); + numtc = 1; + } + } else { + /* At least TC0 is enabled in case of non-DCB case */ + numtc = 1; + } + + vsi->tc_config.numtc = numtc; + vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; + /* Number of queues per enabled TC */ + /* In MFP case we can have a much lower count of MSIx + * vectors available and so we need to lower the used + * q count. + */ + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + num_tc_qps = qcount / numtc; + num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); + + /* Setup queue offset/count for all TCs for given VSI */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* See if the given TC is enabled for the given VSI */ + if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ + int pow, num_qps; + + switch (vsi->type) { + case I40E_VSI_MAIN: + qcount = min_t(int, pf->rss_size, num_tc_qps); + break; +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + qcount = num_tc_qps; + break; +#endif + case I40E_VSI_FDIR: + case I40E_VSI_SRIOV: + case I40E_VSI_VMDQ2: + default: + qcount = num_tc_qps; + WARN_ON(i != 0); + break; + } + vsi->tc_config.tc_info[i].qoffset = offset; + vsi->tc_config.tc_info[i].qcount = qcount; + + /* find the next higher power-of-2 of num queue pairs */ + num_qps = qcount; + pow = 0; + while (num_qps && ((1 << pow) < qcount)) { + pow++; + num_qps >>= 1; + } + + vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; + qmap = + (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + + offset += qcount; + } else { + /* TC is not enabled so set the offset to + * default queue and allocate one queue + * for the given TC. + */ + vsi->tc_config.tc_info[i].qoffset = 0; + vsi->tc_config.tc_info[i].qcount = 1; + vsi->tc_config.tc_info[i].netdev_tc = 0; + + qmap = 0; + } + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); + } + + /* Set actual Tx/Rx queue pairs */ + vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } + + /* Scheduler section valid can only be set for ADD VSI */ + if (is_add) { + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; + + ctxt->info.up_enable_bits = enabled_tc; + } + if (vsi->type == I40E_VSI_SRIOV) { + ctxt->info.mapping_flags |= + cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->num_queue_pairs; i++) + ctxt->info.queue_mapping[i] = + cpu_to_le16(vsi->base_queue + i); + } else { + ctxt->info.mapping_flags |= + cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); + ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); + } + ctxt->info.valid_sections |= cpu_to_le16(sections); +} + +/** + * i40e_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +#ifdef I40E_FCOE +void i40e_set_rx_mode(struct net_device *netdev) +#else +static void i40e_set_rx_mode(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_mac_filter *f, *ftmp; + struct i40e_vsi *vsi = np->vsi; + struct netdev_hw_addr *uca; + struct netdev_hw_addr *mca; + struct netdev_hw_addr *ha; + + /* add addr if not already in the filter list */ + netdev_for_each_uc_addr(uca, netdev) { + if (!i40e_find_mac(vsi, uca->addr, false, true)) { + if (i40e_is_vsi_in_vlan(vsi)) + i40e_put_mac_in_vlan(vsi, uca->addr, + false, true); + else + i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, + false, true); + } + } + + netdev_for_each_mc_addr(mca, netdev) { + if (!i40e_find_mac(vsi, mca->addr, false, true)) { + if (i40e_is_vsi_in_vlan(vsi)) + i40e_put_mac_in_vlan(vsi, mca->addr, + false, true); + else + i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, + false, true); + } + } + + /* remove filter if not in netdev list */ + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + bool found = false; + + if (!f->is_netdev) + continue; + + if (is_multicast_ether_addr(f->macaddr)) { + netdev_for_each_mc_addr(mca, netdev) { + if (ether_addr_equal(mca->addr, f->macaddr)) { + found = true; + break; + } + } + } else { + netdev_for_each_uc_addr(uca, netdev) { + if (ether_addr_equal(uca->addr, f->macaddr)) { + found = true; + break; + } + } + + for_each_dev_addr(netdev, ha) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + found = true; + break; + } + } + } + if (!found) + i40e_del_filter( + vsi, f->macaddr, I40E_VLAN_ANY, false, true); + } + + /* check for other flag changes */ + if (vsi->current_netdev_flags != vsi->netdev->flags) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } +} + +/** + * i40e_sync_vsi_filters - Update the VSI filter list to the HW + * @vsi: ptr to the VSI + * + * Push any outstanding VSI filter changes through the AdminQ. + * + * Returns 0 or error value + **/ +int i40e_sync_vsi_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + bool promisc_forced_on = false; + bool add_happened = false; + int filter_list_len = 0; + u32 changed_flags = 0; + i40e_status aq_ret = 0; + struct i40e_pf *pf; + int num_add = 0; + int num_del = 0; + u16 cmd_flags; + + /* empty array typed pointers, kcalloc later */ + struct i40e_aqc_add_macvlan_element_data *add_list; + struct i40e_aqc_remove_macvlan_element_data *del_list; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) + usleep_range(1000, 2000); + pf = vsi->back; + + if (vsi->netdev) { + changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; + vsi->current_netdev_flags = vsi->netdev->flags; + } + + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { + vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; + + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) + return -ENOMEM; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter != 0) + continue; + f->changed = false; + cmd_flags = 0; + + /* add to delete list */ + ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); + del_list[num_del].vlan_tag = + cpu_to_le16((u16)(f->vlan == + I40E_VLAN_ANY ? 0 : f->vlan)); + + cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + del_list[num_del].flags = cmd_flags; + num_del++; + + /* unlink from filter list */ + list_del(&f->list); + kfree(f); + + /* flush a full buffer */ + if (num_del == filter_list_len) { + aq_ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, del_list, num_del, + NULL); + num_del = 0; + memset(del_list, 0, sizeof(*del_list)); + + if (aq_ret && + pf->hw.aq.asq_last_status != + I40E_AQ_RC_ENOENT) + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", + aq_ret, + pf->hw.aq.asq_last_status); + } + } + if (num_del) { + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + del_list, num_del, NULL); + num_del = 0; + + if (aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + + kfree(del_list); + del_list = NULL; + + /* do all the adds now */ + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_add_macvlan_element_data), + add_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_add_macvlan_element_data), + GFP_KERNEL); + if (!add_list) + return -ENOMEM; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + add_happened = true; + cmd_flags = 0; + + /* add to add array */ + ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); + add_list[num_add].vlan_tag = + cpu_to_le16( + (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); + add_list[num_add].queue_number = 0; + + cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + add_list[num_add].flags = cpu_to_le16(cmd_flags); + num_add++; + + /* flush a full buffer */ + if (num_add == filter_list_len) { + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); + num_add = 0; + + if (aq_ret) + break; + memset(add_list, 0, sizeof(*add_list)); + } + } + if (num_add) { + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); + num_add = 0; + } + kfree(add_list); + add_list = NULL; + + if (add_happened && aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { + dev_info(&pf->pdev->dev, + "add filter failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && + !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)) { + promisc_forced_on = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); + } + } + } + + /* check for changes in promiscuous modes */ + if (changed_flags & IFF_ALLMULTI) { + bool cur_multipromisc; + cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set multi promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { + bool cur_promisc; + cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || + test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)); + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set uni promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + + clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + return 0; +} + +/** + * i40e_sync_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_filters_subtask(struct i40e_pf *pf) +{ + int v; + + if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) + return; + pf->flags &= ~I40E_FLAG_FILTER_SYNC; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v] && + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) + i40e_sync_vsi_filters(pf->vsi[v]); + } +} + +/** + * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int i40e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct i40e_vsi *vsi = np->vsi; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) + return -EINVAL; + + netdev_info(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + if (netif_running(netdev)) + i40e_vsi_reinit_locked(vsi); + + return 0; +} + +/** + * i40e_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return i40e_ptp_get_ts_config(pf, ifr); + case SIOCSHWTSTAMP: + return i40e_ptp_set_ts_config(pf, ifr); + default: + return -EOPNOTSUPP; + } +} + +/** + * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI + * @vsi: the vsi being adjusted + **/ +void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) +{ + struct i40e_vsi_context ctxt; + i40e_status ret; + + if ((vsi->info.valid_sections & + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) + return; /* already enabled */ + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI + * @vsi: the vsi being adjusted + **/ +void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) +{ + struct i40e_vsi_context ctxt; + i40e_status ret; + + if ((vsi->info.valid_sections & + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == + I40E_AQ_VSI_PVLAN_EMOD_MASK)) + return; /* already disabled */ + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_vlan_rx_register - Setup or shutdown vlan offload + * @netdev: network interface to be adjusted + * @features: netdev features to test if VLAN offload is enabled or not + **/ +static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); +} + +/** + * i40e_vsi_add_vlan - Add vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be added (0 = untagged only , -1 = any) + **/ +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) +{ + struct i40e_mac_filter *f, *add_f; + bool is_netdev, is_vf; + + is_vf = (vsi->type == I40E_VSI_SRIOV); + is_netdev = !!(vsi->netdev); + + if (is_netdev) { + add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add vlan filter %d for %pM\n", + vid, vsi->netdev->dev_addr); + return -ENOMEM; + } + } + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add vlan filter %d for %pM\n", + vid, f->macaddr); + return -ENOMEM; + } + } + + /* Now if we add a vlan tag, make sure to check if it is the first + * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" + * with 0, so we now accept untagged and specified tagged traffic + * (and not any taged and untagged) + */ + if (vid > 0) { + if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, + I40E_VLAN_ANY, + is_vf, is_netdev)) { + i40e_del_filter(vsi, vsi->netdev->dev_addr, + I40E_VLAN_ANY, is_vf, is_netdev); + add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + vsi->netdev->dev_addr); + return -ENOMEM; + } + } + } + + /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ + if (vid > 0 && !vsi->info.pvid) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) { + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + return -ENOMEM; + } + } + } + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + + return i40e_sync_vsi_filters(vsi); +} + +/** + * i40e_vsi_kill_vlan - Remove vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be removed (0 = untagged only , -1 = any) + * + * Return: 0 on success or negative otherwise + **/ +int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) +{ + struct net_device *netdev = vsi->netdev; + struct i40e_mac_filter *f, *add_f; + bool is_vf, is_netdev; + int filter_count = 0; + + is_vf = (vsi->type == I40E_VSI_SRIOV); + is_netdev = !!(netdev); + + if (is_netdev) + i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); + + list_for_each_entry(f, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); + + /* go through all the filters for this VSI and if there is only + * vid == 0 it means there are no other filters, so vid 0 must + * be replaced with -1. This signifies that we should from now + * on accept any traffic (with any tag present, or untagged) + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (is_netdev) { + if (f->vlan && + ether_addr_equal(netdev->dev_addr, f->macaddr)) + filter_count++; + } + + if (f->vlan) + filter_count++; + } + + if (!filter_count && is_netdev) { + i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); + f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, + is_vf, is_netdev); + if (!f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter %d for %pM\n", + I40E_VLAN_ANY, netdev->dev_addr); + return -ENOMEM; + } + } + + if (!filter_count) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter %d for %pM\n", + I40E_VLAN_ANY, f->macaddr); + return -ENOMEM; + } + } + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + + return i40e_sync_vsi_filters(vsi); +} + +/** + * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload + * @netdev: network interface to be adjusted + * @vid: vlan id to be added + * + * net_device_ops implementation for adding vlan ids + **/ +#ifdef I40E_FCOE +int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + int ret = 0; + + if (vid > 4095) + return -EINVAL; + + netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); + + /* If the network stack called us with vid = 0 then + * it is asking to receive priority tagged packets with + * vlan id 0. Our HW receives them by default when configured + * to receive untagged packets so there is no need to add an + * extra filter for vlan 0 tagged packets. + */ + if (vid) + ret = i40e_vsi_add_vlan(vsi, vid); + + if (!ret && (vid < VLAN_N_VID)) + set_bit(vid, vsi->active_vlans); + + return ret; +} + +/** + * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload + * @netdev: network interface to be adjusted + * @vid: vlan id to be removed + * + * net_device_ops implementation for removing vlan ids + **/ +#ifdef I40E_FCOE +int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); + + /* return code is ignored as there is nothing a user + * can do about failure to remove and a log message was + * already printed from the other function + */ + i40e_vsi_kill_vlan(vsi, vid); + + clear_bit(vid, vsi->active_vlans); + + return 0; +} + +/** + * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up + * @vsi: the vsi being brought back up + **/ +static void i40e_restore_vlan(struct i40e_vsi *vsi) +{ + u16 vid; + + if (!vsi->netdev) + return; + + i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); + + for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) + i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), + vid); +} + +/** + * i40e_vsi_add_pvid - Add pvid for the VSI + * @vsi: the vsi being adjusted + * @vid: the vlan id to set as a PVID + **/ +int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) +{ + struct i40e_vsi_context ctxt; + i40e_status aq_ret; + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.pvid = cpu_to_le16(vid); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | + I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_EMOD_STR; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + return -ENOENT; + } + + return 0; +} + +/** + * i40e_vsi_remove_pvid - Remove the pvid from the VSI + * @vsi: the vsi being adjusted + * + * Just use the vlan_rx_register() service to put it back to normal + **/ +void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) +{ + i40e_vlan_stripping_disable(vsi); + + vsi->info.pvid = 0; +} + +/** + * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources + * @vsi: ptr to the VSI + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) +{ + int i, err = 0; + + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); + + return err; +} + +/** + * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues + * @vsi: ptr to the VSI + * + * Free VSI's transmit software resources + **/ +static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->tx_rings) + return; + + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + i40e_free_tx_resources(vsi->tx_rings[i]); +} + +/** + * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources + * @vsi: ptr to the VSI + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) +{ + int i, err = 0; + + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); +#ifdef I40E_FCOE + i40e_fcoe_setup_ddp_resources(vsi); +#endif + return err; +} + +/** + * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues + * @vsi: ptr to the VSI + * + * Free all receive software resources + **/ +static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->rx_rings) + return; + + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) + i40e_free_rx_resources(vsi->rx_rings[i]); +#ifdef I40E_FCOE + i40e_fcoe_free_ddp_resources(vsi); +#endif +} + +/** + * i40e_config_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + **/ +static void i40e_config_xps_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + cpumask_var_t mask; + + if (!ring->q_vector || !ring->netdev) + return; + + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1) { + if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); + } +} + +/** + * i40e_configure_tx_ring - Configure a transmit ring context and rest + * @ring: The Tx ring to configure + * + * Configure the Tx descriptor ring in the HMC context. + **/ +static int i40e_configure_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + u16 pf_q = vsi->base_queue + ring->queue_index; + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_hmc_obj_txq tx_ctx; + i40e_status err = 0; + u32 qtx_ctl = 0; + + /* some ATR related tx ring init */ + if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { + ring->atr_sample_rate = vsi->back->atr_sample_rate; + ring->atr_count = 0; + } else { + ring->atr_sample_rate = 0; + } + + /* configure XPS */ + i40e_config_xps_tx_ring(ring); + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + + tx_ctx.new_context = 1; + tx_ctx.base = (ring->dma / 128); + tx_ctx.qlen = ring->count; + tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED)); +#ifdef I40E_FCOE + tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); +#endif + tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + /* FDIR VSI tx ring can still use RS bit and writebacks */ + if (vsi->type != I40E_VSI_FDIR) + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = ring->dma + + (ring->count * sizeof(struct i40e_tx_desc)); + + /* As part of VSI creation/update, FW allocates certain + * Tx arbitration queue sets for each TC enabled for + * the VSI. The FW returns the handles to these queue + * sets as part of the response buffer to Add VSI, + * Update VSI, etc. AQ commands. It is expected that + * these queue set handles be associated with the Tx + * queues by the driver as part of the TX queue context + * initialization. This has to be done regardless of + * DCB as by default everything is mapped to TC0. + */ + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); + tx_ctx.rdylist_act = 0; + + /* clear the context in the HMC */ + err = i40e_clear_lan_tx_queue_context(hw, pf_q); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* set the context in the HMC */ + err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* Now associate this queue with this PCI function */ + if (vsi->type == I40E_VSI_VMDQ2) { + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } + + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); + wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); + i40e_flush(hw); + + clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); + + /* cache tail off for easier writes later */ + ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); + + return 0; +} + +/** + * i40e_configure_rx_ring - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in the HMC context. + **/ +static int i40e_configure_rx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; + u16 pf_q = vsi->base_queue + ring->queue_index; + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_hmc_obj_rxq rx_ctx; + i40e_status err = 0; + + ring->state = 0; + + /* clear the context structure first */ + memset(&rx_ctx, 0, sizeof(rx_ctx)); + + ring->rx_buf_len = vsi->rx_buf_len; + ring->rx_hdr_len = vsi->rx_hdr_len; + + rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; + + rx_ctx.base = (ring->dma / 128); + rx_ctx.qlen = ring->count; + + if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { + set_ring_16byte_desc_enabled(ring); + rx_ctx.dsize = 0; + } else { + rx_ctx.dsize = 1; + } + + rx_ctx.dtype = vsi->dtype; + if (vsi->dtype) { + set_ring_ps_enabled(ring); + rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | + I40E_RX_SPLIT_IP | + I40E_RX_SPLIT_TCP_UDP | + I40E_RX_SPLIT_SCTP; + } else { + rx_ctx.hsplit_0 = 0; + } + + rx_ctx.rxmax = min_t(u16, vsi->max_frame, + (chain_len * ring->rx_buf_len)); + if (hw->revision_id == 0) + rx_ctx.lrxqthresh = 0; + else + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 1; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 1; +#ifdef I40E_FCOE + rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); +#endif + /* set the prefena field to 1 because the manual says to */ + rx_ctx.prefena = 1; + + /* clear the context in the HMC */ + err = i40e_clear_lan_rx_queue_context(hw, pf_q); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* set the context in the HMC */ + err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); + writel(0, ring->tail); + + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } + + return 0; +} + +/** + * i40e_vsi_configure_tx - Configure the VSI for Tx + * @vsi: VSI structure describing this set of rings and resources + * + * Configure the Tx VSI for operation. + **/ +static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) +{ + int err = 0; + u16 i; + + for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) + err = i40e_configure_tx_ring(vsi->tx_rings[i]); + + return err; +} + +/** + * i40e_vsi_configure_rx - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Configure the Rx VSI for operation. + **/ +static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) +{ + int err = 0; + u16 i; + + if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) + vsi->max_frame = vsi->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN; + else + vsi->max_frame = I40E_RXBUFFER_2048; + + /* figure out correct receive buffer length */ + switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | + I40E_FLAG_RX_PS_ENABLED)) { + case I40E_FLAG_RX_1BUF_ENABLED: + vsi->rx_hdr_len = 0; + vsi->rx_buf_len = vsi->max_frame; + vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; + break; + case I40E_FLAG_RX_PS_ENABLED: + vsi->rx_hdr_len = I40E_RX_HDR_SIZE; + vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; + break; + default: + vsi->rx_hdr_len = I40E_RX_HDR_SIZE; + vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; + break; + } + +#ifdef I40E_FCOE + /* setup rx buffer for FCoE */ + if ((vsi->type == I40E_VSI_FCOE) && + (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { + vsi->rx_hdr_len = 0; + vsi->rx_buf_len = I40E_RXBUFFER_3072; + vsi->max_frame = I40E_RXBUFFER_3072; + vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; + } + +#endif /* I40E_FCOE */ + /* round up for the chip's needs */ + vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, + (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + + /* set up individual rings */ + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_configure_rx_ring(vsi->rx_rings[i]); + + return err; +} + +/** + * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC + * @vsi: ptr to the VSI + **/ +static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring, *rx_ring; + u16 qoffset, qcount; + int i, n; + + if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = 0; + tx_ring->dcb_tc = 0; + } + } + + for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { + if (!(vsi->tc_config.enabled_tc & (1 << n))) + continue; + + qoffset = vsi->tc_config.tc_info[n].qoffset; + qcount = vsi->tc_config.tc_info[n].qcount; + for (i = qoffset; i < (qoffset + qcount); i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = n; + tx_ring->dcb_tc = n; + } + } +} + +/** + * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI + * @vsi: ptr to the VSI + **/ +static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) +{ + if (vsi->netdev) + i40e_set_rx_mode(vsi->netdev); +} + +/** + * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters + * @vsi: Pointer to the targeted VSI + * + * This function replays the hlist on the hw where all the SB Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) +{ + struct i40e_fdir_filter *filter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + i40e_add_del_fdir(vsi, filter, true); + } +} + +/** + * i40e_vsi_configure - Set up the VSI for action + * @vsi: the VSI being configured + **/ +static int i40e_vsi_configure(struct i40e_vsi *vsi) +{ + int err; + + i40e_set_vsi_rx_mode(vsi); + i40e_restore_vlan(vsi); + i40e_vsi_config_dcb_rings(vsi); + err = i40e_vsi_configure_tx(vsi); + if (!err) + err = i40e_vsi_configure_rx(vsi); + + return err; +} + +/** + * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW + * @vsi: the VSI being configured + **/ +static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_q_vector *q_vector; + struct i40e_hw *hw = &pf->hw; + u16 vector; + int i, q; + u32 val; + u32 qp; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = vsi->base_queue; + vector = vsi->base_vector; + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), + q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), + q_vector->tx.itr); + + /* Linked list for the queuepairs assigned to this vector */ + wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); + for (q = 0; q < q_vector->num_ringpairs; q++) { + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | + (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | + (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | + (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| + (I40E_QUEUE_TYPE_TX + << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | + (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | + (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | + ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| + (I40E_QUEUE_TYPE_RX + << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + + /* Terminate the linked list */ + if (q == (q_vector->num_ringpairs - 1)) + val |= (I40E_QUEUE_END_OF_LIST + << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + qp++; + } + } + + i40e_flush(hw); +} + +/** + * i40e_enable_misc_int_causes - enable the non-queue interrupts + * @hw: ptr to the hardware info + **/ +static void i40e_enable_misc_int_causes(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + /* clear things first */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ + rd32(hw, I40E_PFINT_ICR0); /* read to clear */ + + val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | + I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | + I40E_PFINT_ICR0_ENA_GRST_MASK | + I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | + I40E_PFINT_ICR0_ENA_GPIO_MASK | + I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | + I40E_PFINT_ICR0_ENA_VFLR_MASK | + I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + + if (pf->flags & I40E_FLAG_PTP) + val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + + wr32(hw, I40E_PFINT_ICR0_ENA, val); + + /* SW_ITR_IDX = 0, but don't change INTENA */ + wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); + + /* OTHER_ITR_IDX = 0 */ + wr32(hw, I40E_PFINT_STAT_CTL0, 0); +} + +/** + * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW + * @vsi: the VSI being configured + **/ +static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + /* set the ITR configuration */ + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); + + i40e_enable_misc_int_causes(pf); + + /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ + wr32(hw, I40E_PFINT_LNKLST0, 0); + + /* Associate the queue pair to the vector and enable the queue int */ + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | + (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | + (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + + wr32(hw, I40E_QINT_RQCTL(0), val); + + val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | + (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | + (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + + wr32(hw, I40E_QINT_TQCTL(0), val); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + wr32(hw, I40E_PFINT_DYN_CTL0, + I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); + + wr32(hw, I40E_PFINT_DYN_CTL0, val); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_enable - Enable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: enable a particular Hw Interrupt vector + **/ +void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + /* skip the flush */ +} + +/** + * i40e_irq_dynamic_disable - Disable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: disable a particular Hw Interrupt vector + **/ +void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + i40e_flush(hw); +} + +/** + * i40e_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_msix_clean_rings(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts + * @vsi: the VSI being configured + * @basename: name for the vector + * + * Allocates MSI-X vectors and requests interrupts from the kernel. + **/ +static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) +{ + int q_vectors = vsi->num_q_vectors; + struct i40e_pf *pf = vsi->back; + int base = vsi->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + + for (vector = 0; vector < q_vectors; vector++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(pf->msix_entries[base + vector].vector, + vsi->irq_handler, + 0, + q_vector->name, + q_vector); + if (err) { + dev_info(&pf->pdev->dev, + "%s: request_irq failed, error: %d\n", + __func__, err); + goto free_queue_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint(pf->msix_entries[base + vector].vector, + &q_vector->affinity_mask); + } + + vsi->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(pf->msix_entries[base + vector].vector, + NULL); + free_irq(pf->msix_entries[base + vector].vector, + &(vsi->q_vectors[vector])); + } + return err; +} + +/** + * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + **/ +static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int base = vsi->base_vector; + int i; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); + wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); + } + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = vsi->base_vector; + i < (vsi->num_q_vectors + vsi->base_vector); i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); + + i40e_flush(hw); + for (i = 0; i < vsi->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + /* Legacy and MSI mode - this stops all interrupt handling */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + wr32(hw, I40E_PFINT_DYN_CTL0, 0); + i40e_flush(hw); + synchronize_irq(pf->pdev->irq); + } +} + +/** + * i40e_vsi_enable_irq - Enable IRQ for the given VSI + * @vsi: the VSI being configured + **/ +static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = vsi->base_vector; + i < (vsi->num_q_vectors + vsi->base_vector); i++) + i40e_irq_dynamic_enable(vsi, i); + } else { + i40e_irq_dynamic_enable_icr0(pf); + } + + i40e_flush(&pf->hw); + return 0; +} + +/** + * i40e_stop_misc_vector - Stop the vector that handles non-queue events + * @pf: board private structure + **/ +static void i40e_stop_misc_vector(struct i40e_pf *pf) +{ + /* Disable ICR 0 */ + wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); + i40e_flush(&pf->hw); +} + +/** + * i40e_intr - MSI/Legacy and non-queue interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + * + * This is the handler used for all MSI/Legacy interrupts, and deals + * with both queue and non-queue interrupts. This is also used in + * MSIX mode to handle the non-queue interrupts. + **/ +static irqreturn_t i40e_intr(int irq, void *data) +{ + struct i40e_pf *pf = (struct i40e_pf *)data; + struct i40e_hw *hw = &pf->hw; + irqreturn_t ret = IRQ_NONE; + u32 icr0, icr0_remaining; + u32 val, ena_mask; + + icr0 = rd32(hw, I40E_PFINT_ICR0); + ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); + + /* if sharing a legacy IRQ, we might get called w/o an intr pending */ + if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) + goto enable_intr; + + /* if interrupt but no bits showing, must be SWINT */ + if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || + (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) + pf->sw_int_count++; + + /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ + if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + + /* temporarily disable queue cause for NAPI processing */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_RQCTL(0), qval); + + qval = rd32(hw, I40E_QINT_TQCTL(0)); + qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + + if (!test_bit(__I40E_DOWN, &pf->state)) + napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + } + + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; + set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; + set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; + val = rd32(hw, I40E_GLGEN_RSTAT); + val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) + >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; + if (val == I40E_RESET_CORER) { + pf->corer_count++; + } else if (val == I40E_RESET_GLOBR) { + pf->globr_count++; + } else if (val == I40E_RESET_EMPR) { + pf->empr_count++; + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); + } + } + + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { + icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; + dev_info(&pf->pdev->dev, "HMC error interrupt\n"); + dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", + rd32(hw, I40E_PFHMC_ERRORINFO), + rd32(hw, I40E_PFHMC_ERRORDATA)); + } + + if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { + u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + i40e_ptp_tx_hwtstamp(pf); + } + } + + /* If a critical error is pending we have no choice but to reset the + * device. + * Report and mask out any remaining unexpected interrupts. + */ + icr0_remaining = icr0 & ena_mask; + if (icr0_remaining) { + dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", + icr0_remaining); + if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || + (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || + (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { + dev_info(&pf->pdev->dev, "device will be reset\n"); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + ena_mask &= ~icr0_remaining; + } + ret = IRQ_HANDLED; + +enable_intr: + /* re-enable interrupt causes */ + wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); + if (!test_bit(__I40E_DOWN, &pf->state)) { + i40e_service_event_schedule(pf); + i40e_irq_dynamic_enable_icr0(pf); + } + + return ret; +} + +/** + * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + struct i40e_vsi *vsi = tx_ring->vsi; + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_desc; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + tx_desc->buffer_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + /* move past filter desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buf->raw_buf); + + tx_buf->raw_buf = NULL; + tx_buf->tx_flags = 0; + tx_buf->next_to_watch = NULL; + dma_unmap_len_set(tx_buf, len, 0); + tx_desc->buffer_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + + /* move us past the eop_desc for start of next FD desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_irq_dynamic_enable(vsi, + tx_ring->q_vector->v_idx + vsi->base_vector); + } + return budget > 0; +} + +/** + * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + struct i40e_vsi *vsi; + + if (!q_vector->tx.ring) + return IRQ_HANDLED; + + vsi = q_vector->tx.ring->vsi; + i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); + + return IRQ_HANDLED; +} + +/** + * i40e_map_vector_to_qp - Assigns the queue pair to the vector + * @vsi: the VSI being configured + * @v_idx: vector index + * @qp_idx: queue pair index + **/ +static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; + struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per queue pair, but on a constrained vector budget, we + * group the queue pairs as "efficiently" as possible. + **/ +static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) +{ + int qp_remaining = vsi->num_queue_pairs; + int q_vectors = vsi->num_q_vectors; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + + while (num_ringpairs--) { + map_vector_to_qp(vsi, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +/** + * i40e_vsi_request_irq - Request IRQ from the OS + * @vsi: the VSI being configured + * @basename: name for the vector + **/ +static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) +{ + struct i40e_pf *pf = vsi->back; + int err; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + err = i40e_vsi_request_irq_msix(vsi, basename); + else if (pf->flags & I40E_FLAG_MSI_ENABLED) + err = request_irq(pf->pdev->irq, i40e_intr, 0, + pf->int_name, pf); + else + err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, + pf->int_name, pf); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * i40e_netpoll - A Polling 'interrupt'handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +#ifdef I40E_FCOE +void i40e_netpoll(struct net_device *netdev) +#else +static void i40e_netpoll(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &vsi->state)) + return; + + pf->flags |= I40E_FLAG_IN_NETPOLL; + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = 0; i < vsi->num_q_vectors; i++) + i40e_msix_clean_rings(0, vsi->q_vectors[i]); + } else { + i40e_intr(pf->pdev->irq, netdev); + } + pf->flags &= ~I40E_FLAG_IN_NETPOLL; +} +#endif + +/** + * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @enable: enable or disable state of the queue + * + * This routine will wait for the given Tx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + **/ +static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) +{ + int i; + u32 tx_reg; + + for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { + tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + + usleep_range(10, 20); + } + if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * i40e_vsi_control_tx - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int i, j, pf_q, ret = 0; + u32 tx_reg; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + + /* warn the TX unit of coming changes */ + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); + if (!enable) + usleep_range(10, 20); + + for (j = 0; j < 50; j++) { + tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + /* Skip if the queue is already in the requested state */ + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + continue; + + /* turn on/off the queue */ + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + + wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; + + /* wait for the change to finish */ + ret = i40e_pf_txq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d %sable timeout\n", + __func__, vsi->seid, pf_q, + (enable ? "en" : "dis")); + break; + } + } + + if (hw->revision_id == 0) + mdelay(50); + return ret; +} + +/** + * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @enable: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + **/ +static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) +{ + int i; + u32 rx_reg; + + for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { + rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + + usleep_range(10, 20); + } + if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * i40e_vsi_control_rx - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int i, j, pf_q, ret = 0; + u32 rx_reg; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + for (j = 0; j < 50; j++) { + rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + continue; + + /* turn on/off the queue */ + if (enable) + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; + else + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Rx ring %d %sable timeout\n", + __func__, vsi->seid, pf_q, + (enable ? "en" : "dis")); + break; + } + } + + return ret; +} + +/** + * i40e_vsi_control_rings - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) +{ + int ret = 0; + + /* do rx first for enable and last for disable */ + if (request) { + ret = i40e_vsi_control_rx(vsi, request); + if (ret) + return ret; + ret = i40e_vsi_control_tx(vsi, request); + } else { + /* Ignore return value, we need to shutdown whatever we can */ + i40e_vsi_control_tx(vsi, request); + i40e_vsi_control_rx(vsi, request); + } + + return ret; +} + +/** + * i40e_vsi_free_irq - Free the irq association with the OS + * @vsi: the VSI being configured + **/ +static void i40e_vsi_free_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int base = vsi->base_vector; + u32 val, qp; + int i; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (!vsi->q_vectors) + return; + + if (!vsi->irqs_ready) + return; + + vsi->irqs_ready = false; + for (i = 0; i < vsi->num_q_vectors; i++) { + u16 vector = i + base; + + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !vsi->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(pf->msix_entries[vector].vector, + NULL); + free_irq(pf->msix_entries[vector].vector, + vsi->q_vectors[i]); + + /* Tear down the interrupt queue link list + * + * We know that they come in pairs and always + * the Rx first, then the Tx. To clear the + * link list, stick the EOL value into the + * next_q field of the registers. + */ + val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); + qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) + >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + val |= I40E_QUEUE_END_OF_LIST + << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); + + while (qp != I40E_QUEUE_END_OF_LIST) { + u32 next; + + val = rd32(hw, I40E_QINT_RQCTL(qp)); + + val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | + I40E_QINT_RQCTL_MSIX0_INDX_MASK | + I40E_QINT_RQCTL_CAUSE_ENA_MASK | + I40E_QINT_RQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | + I40E_QINT_RQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = rd32(hw, I40E_QINT_TQCTL(qp)); + + next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) + >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; + + val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | + I40E_QINT_TQCTL_MSIX0_INDX_MASK | + I40E_QINT_TQCTL_CAUSE_ENA_MASK | + I40E_QINT_TQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | + I40E_QINT_TQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + qp = next; + } + } + } else { + free_irq(pf->pdev->irq, pf); + + val = rd32(hw, I40E_PFINT_LNKLST0); + qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) + >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + val |= I40E_QUEUE_END_OF_LIST + << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + + val = rd32(hw, I40E_QINT_RQCTL(qp)); + val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | + I40E_QINT_RQCTL_MSIX0_INDX_MASK | + I40E_QINT_RQCTL_CAUSE_ENA_MASK | + I40E_QINT_RQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | + I40E_QINT_RQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = rd32(hw, I40E_QINT_TQCTL(qp)); + + val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | + I40E_QINT_TQCTL_MSIX0_INDX_MASK | + I40E_QINT_TQCTL_CAUSE_ENA_MASK | + I40E_QINT_TQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | + I40E_QINT_TQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + } +} + +/** + * i40e_free_q_vector - Free memory allocated for specific interrupt vector + * @vsi: the VSI being configured + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *ring; + + if (!q_vector) + return; + + /* disassociate q_vector from rings */ + i40e_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + + i40e_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI w/ an associated netdev is set up w/ NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + vsi->q_vectors[v_idx] = NULL; + + kfree_rcu(q_vector, rcu); +} + +/** + * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI being un-configured + * + * This frees the memory allocated to the q_vectors and + * deletes references to the NAPI struct. + **/ +static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) +{ + int v_idx; + + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + i40e_free_q_vector(vsi, v_idx); +} + +/** + * i40e_reset_interrupt_capability - Disable interrupt setup in OS + * @pf: board private structure + **/ +static void i40e_reset_interrupt_capability(struct i40e_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + kfree(pf->irq_pile); + pf->irq_pile = NULL; + } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { + pci_disable_msi(pf->pdev); + } + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); +} + +/** + * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @pf: board private structure + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) +{ + int i; + + i40e_stop_misc_vector(pf); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } + + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i]) + i40e_vsi_free_q_vectors(pf->vsi[i]); + i40e_reset_interrupt_capability(pf); +} + +/** + * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI + * @vsi: the VSI being configured + **/ +static void i40e_napi_enable_all(struct i40e_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_enable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI + * @vsi: the VSI being configured + **/ +static void i40e_napi_disable_all(struct i40e_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_disable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * i40e_vsi_close - Shut down a VSI + * @vsi: the vsi to be quelled + **/ +static void i40e_vsi_close(struct i40e_vsi *vsi) +{ + if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + i40e_down(vsi); + i40e_vsi_free_irq(vsi); + i40e_vsi_free_tx_resources(vsi); + i40e_vsi_free_rx_resources(vsi); +} + +/** + * i40e_quiesce_vsi - Pause a given VSI + * @vsi: the VSI being paused + **/ +static void i40e_quiesce_vsi(struct i40e_vsi *vsi) +{ + if (test_bit(__I40E_DOWN, &vsi->state)) + return; + + /* No need to disable FCoE VSI when Tx suspended */ + if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && + vsi->type == I40E_VSI_FCOE) { + dev_dbg(&vsi->back->pdev->dev, + "%s: VSI seid %d skipping FCoE VSI disable\n", + __func__, vsi->seid); + return; + } + + set_bit(__I40E_NEEDS_RESTART, &vsi->state); + if (vsi->netdev && netif_running(vsi->netdev)) { + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + } else { + i40e_vsi_close(vsi); + } +} + +/** + * i40e_unquiesce_vsi - Resume a given VSI + * @vsi: the VSI being resumed + **/ +static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) +{ + if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) + return; + + clear_bit(__I40E_NEEDS_RESTART, &vsi->state); + if (vsi->netdev && netif_running(vsi->netdev)) + vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + else + i40e_vsi_open(vsi); /* this clears the DOWN bit */ +} + +/** + * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF + * @pf: the PF + **/ +static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) +{ + int v; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + i40e_quiesce_vsi(pf->vsi[v]); + } +} + +/** + * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF + * @pf: the PF + **/ +static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) +{ + int v; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + i40e_unquiesce_vsi(pf->vsi[v]); + } +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * @vsi: the VSI being configured + * + * This function waits for the given VSI's Tx queues to be disabled. + **/ +static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q, ret; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_txq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d disable timeout\n", + __func__, vsi->seid, pf_q); + return ret; + } + } + + return 0; +} + +/** + * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * @pf: the PF + * + * This function waits for the Tx queues to be in disabled state for all the + * VSIs that are managed by this PF. + **/ +static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +{ + int v, ret = 0; + + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + /* No need to wait for FCoE VSI queues */ + if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + if (ret) + break; + } + } + + return ret; +} + +#endif +/** + * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP + * @pf: pointer to PF + * + * Get TC map for ISCSI PF type that will include iSCSI TC + * and LAN TC. + **/ +static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) +{ + struct i40e_dcb_app_priority_table app; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 1; /* TC0 is always enabled */ + u8 tc, i; + /* Get the iSCSI APP TLV */ + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = dcbcfg->app[i]; + if (app.selector == I40E_APP_SEL_TCPIP && + app.protocolid == I40E_APP_PROTOID_ISCSI) { + tc = dcbcfg->etscfg.prioritytable[app.priority]; + enabled_tc |= (1 << tc); + break; + } + } + + return enabled_tc; +} + +/** + * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config + * @dcbcfg: the corresponding DCBx configuration structure + * + * Return the number of TCs from given DCBx configuration + **/ +static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) +{ + u8 num_tc = 0; + int i; + + /* Scan the ETS Config Priority Table to find + * traffic class enabled for a given priority + * and use the traffic class index to get the + * number of traffic classes enabled + */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + if (dcbcfg->etscfg.prioritytable[i] > num_tc) + num_tc = dcbcfg->etscfg.prioritytable[i]; + } + + /* Traffic class index starts from zero so + * increment to return the actual count + */ + return num_tc + 1; +} + +/** + * i40e_dcb_get_enabled_tc - Get enabled traffic classes + * @dcbcfg: the corresponding DCBx configuration structure + * + * Query the current DCB configuration and return the number of + * traffic classes enabled from the given DCBX config + **/ +static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) +{ + u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); + u8 enabled_tc = 1; + u8 i; + + for (i = 0; i < num_tc; i++) + enabled_tc |= 1 << i; + + return enabled_tc; +} + +/** + * i40e_pf_get_num_tc - Get enabled traffic classes for PF + * @pf: PF being queried + * + * Return number of traffic classes enabled for the given PF + **/ +static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u8 i, enabled_tc; + u8 num_tc = 0; + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + /* If DCB is not enabled then always in single TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return 1; + + /* SFP mode will be enabled for all TCs on port */ + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + return i40e_dcb_get_num_tc(dcbcfg); + + /* MFP mode return count of enabled TCs for this PF */ + if (pf->hw.func_caps.iscsi) + enabled_tc = i40e_get_iscsi_tc_map(pf); + else + return 1; /* Only TC0 */ + + /* At least have TC0 */ + enabled_tc = (enabled_tc ? enabled_tc : 0x1); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + num_tc++; + } + return num_tc; +} + +/** + * i40e_pf_get_default_tc - Get bitmap for first enabled TC + * @pf: PF being queried + * + * Return a bitmap for first enabled traffic class for this PF. + **/ +static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) +{ + u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; + u8 i = 0; + + if (!enabled_tc) + return 0x1; /* TC0 */ + + /* Find the first enabled TC */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + break; + } + + return 1 << i; +} + +/** + * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes + * @pf: PF being queried + * + * Return a bitmap for enabled traffic classes for this PF. + **/ +static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) +{ + /* If DCB is not enabled for this PF then just return default TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return i40e_pf_get_default_tc(pf); + + /* SFP mode we want PF to be enabled for all TCs */ + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); + + /* MFP enabled and iSCSI PF type */ + if (pf->hw.func_caps.iscsi) + return i40e_get_iscsi_tc_map(pf); + else + return i40e_pf_get_default_tc(pf); +} + +/** + * i40e_vsi_get_bw_info - Query VSI BW Information + * @vsi: the VSI being queried + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) +{ + struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; + struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret; + u32 tc_bw_max; + int i; + + /* Get the VSI level BW configuration */ + aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi bw config, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + return -EINVAL; + } + + /* Get the VSI level BW configuration per TC */ + aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, + NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + return -EINVAL; + } + + if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { + dev_info(&pf->pdev->dev, + "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", + bw_config.tc_valid_bits, + bw_ets_config.tc_valid_bits); + /* Still continuing */ + } + + vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); + vsi->bw_max_quanta = bw_config.max_bw; + tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | + (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; + vsi->bw_ets_limit_credits[i] = + le16_to_cpu(bw_ets_config.credits[i]); + /* 3 bits out of 4 for each TC */ + vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); + } + + return 0; +} + +/** + * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC + * @vsi: the VSI being configured + * @enabled_tc: TC bitmap + * @bw_credits: BW shared credits per TC + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, + u8 *bw_share) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + i40e_status aq_ret; + int i; + + bw_data.tc_valid_bits = enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + bw_data.tc_bw_credits[i] = bw_share[i]; + + aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, + NULL); + if (aq_ret) { + dev_info(&vsi->back->pdev->dev, + "AQ command Config VSI BW allocation per TC failed = %d\n", + vsi->back->hw.aq.asq_last_status); + return -EINVAL; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + vsi->info.qs_handle[i] = bw_data.qs_handles[i]; + + return 0; +} + +/** + * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @enabled_tc: TC map to be enabled + * + **/ +static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) +{ + struct net_device *netdev = vsi->netdev; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u8 netdev_tc = 0; + int i; + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + if (!netdev) + return; + + if (!enabled_tc) { + netdev_reset_tc(netdev); + return; + } + + /* Set up actual enabled TCs on the VSI */ + if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) + return; + + /* set per TC queues for the VSI */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* Only set TC queues for enabled tcs + * + * e.g. For a VSI that has TC0 and TC3 enabled the + * enabled_tc bitmap would be 0x00001001; the driver + * will set the numtc for netdev as 2 that will be + * referenced by the netdev layer as TC 0 and 1. + */ + if (vsi->tc_config.enabled_tc & (1 << i)) + netdev_set_tc_queue(netdev, + vsi->tc_config.tc_info[i].netdev_tc, + vsi->tc_config.tc_info[i].qcount, + vsi->tc_config.tc_info[i].qoffset); + } + + /* Assign UP2TC map for the VSI */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + /* Get the actual TC# for the UP */ + u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** + * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map + * @vsi: the VSI being configured + * @ctxt: the ctxt buffer returned from AQ VSI update param command + **/ +static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt) +{ + /* copy just the sections touched not the entire info + * since not all sections are valid as returned by + * update vsi params + */ + vsi->info.mapping_flags = ctxt->info.mapping_flags; + memcpy(&vsi->info.queue_mapping, + &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); + memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); +} + +/** + * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map + * @vsi: VSI to be configured + * @enabled_tc: TC bitmap + * + * This configures a particular VSI for TCs that are mapped to the + * given TC bitmap. It uses default bandwidth share for TCs across + * VSIs to configure TC for a particular VSI. + * + * NOTE: + * It is expected that the VSI queues have been quisced before calling + * this function. + **/ +static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) +{ + u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + struct i40e_vsi_context ctxt; + int ret = 0; + int i; + + /* Check if enabled_tc is same as existing or new TCs */ + if (vsi->tc_config.enabled_tc == enabled_tc) + return ret; + + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + bw_share[i] = 1; + } + + ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed configuring TC map %d for VSI %d\n", + enabled_tc, vsi->seid); + goto out; + } + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = vsi->back->hw.pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "update vsi failed, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + goto out; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + /* Update current VSI BW information */ + ret = i40e_vsi_get_bw_info(vsi); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed updating vsi bw info, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + goto out; + } + + /* Update the netdev TC setup */ + i40e_vsi_config_netdev_tc(vsi, enabled_tc); +out: + return ret; +} + +/** + * i40e_veb_config_tc - Configure TCs for given VEB + * @veb: given VEB + * @enabled_tc: TC bitmap + * + * Configures given TC bitmap for VEB (switching) element + **/ +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; + struct i40e_pf *pf = veb->pf; + int ret = 0; + int i; + + /* No TCs or already enabled TCs just return */ + if (!enabled_tc || veb->enabled_tc == enabled_tc) + return ret; + + bw_data.tc_valid_bits = enabled_tc; + /* bw_data.absolute_credits is not set (relative) */ + + /* Enable ETS TCs with equal BW Share for now */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + bw_data.tc_bw_share_credits[i] = 1; + } + + ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "veb bw config failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + goto out; + } + + /* Update the BW information */ + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed getting veb bw config, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + +out: + return ret; +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs + * @pf: PF struct + * + * Reconfigure VEB/VSIs on a given PF; it is assumed that + * the caller would've quiesce all the VSIs before calling + * this function + **/ +static void i40e_dcb_reconfigure(struct i40e_pf *pf) +{ + u8 tc_map = 0; + int ret; + u8 v; + + /* Enable the TCs available on PF to all VEBs */ + tc_map = i40e_pf_get_tc_map(pf); + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + ret = i40e_veb_config_tc(pf->veb[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VEB seid=%d\n", + pf->veb[v]->seid); + /* Will try to configure as many components */ + } + } + + /* Update each VSI */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (!pf->vsi[v]) + continue; + + /* - Enable all TCs for the LAN VSI +#ifdef I40E_FCOE + * - For FCoE VSI only enable the TC configured + * as per the APP TLV +#endif + * - For all others keep them at TC0 for now + */ + if (v == pf->lan_vsi) + tc_map = i40e_pf_get_tc_map(pf); + else + tc_map = i40e_pf_get_default_tc(pf); +#ifdef I40E_FCOE + if (pf->vsi[v]->type == I40E_VSI_FCOE) + tc_map = i40e_get_fcoe_tc_map(pf); +#endif /* #ifdef I40E_FCOE */ + + ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VSI seid=%d\n", + pf->vsi[v]->seid); + /* Will try to configure as many components */ + } else { + /* Re-configure VSI vectors based on updated TC map */ + i40e_vsi_map_rings_to_vectors(pf->vsi[v]); + if (pf->vsi[v]->netdev) + i40e_dcbnl_set_all(pf->vsi[v]); + } + } +} + +/** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Resume Port Tx failed = %d\n", + pf->hw.aq.asq_last_status); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + +/** + * i40e_init_pf_dcb - Initialize DCB configuration + * @pf: PF being configured + * + * Query the current DCB configuration and cache it + * in the hardware structure + **/ +static int i40e_init_pf_dcb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + + /* Get the initial DCB configuration */ + err = i40e_init_dcb(hw); + if (!err) { + /* Device/Function is not DCBX capable */ + if ((!hw->func_caps.dcb) || + (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { + dev_info(&pf->pdev->dev, + "DCBX offload is not supported or is disabled for this PF.\n"); + + if (pf->flags & I40E_FLAG_MFP_ENABLED) + goto out; + + } else { + /* When status is not DISABLED then DCBX in FW */ + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; + + pf->flags |= I40E_FLAG_DCB_CAPABLE; + /* Enable DCB tagging only when more than one TC */ + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) + pf->flags |= I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); + } + } else { + dev_info(&pf->pdev->dev, + "AQ Querying DCB configuration failed: aq_err %d\n", + pf->hw.aq.asq_last_status); + } + +out: + return err; +} +#endif /* CONFIG_I40E_DCB */ +#define SPEED_SIZE 14 +#define FC_SIZE 8 +/** + * i40e_print_link_message - print link up or down + * @vsi: the VSI for which link needs a message + */ +static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +{ + char speed[SPEED_SIZE] = "Unknown"; + char fc[FC_SIZE] = "RX/TX"; + + if (!isup) { + netdev_info(vsi->netdev, "NIC Link is Down\n"); + return; + } + + /* Warn user if link speed on NPAR enabled partition is not at + * least 10GB + */ + if (vsi->back->hw.func_caps.npar_enable && + (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || + vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) + netdev_warn(vsi->netdev, + "The partition detected link speed that is less than 10Gbps\n"); + + switch (vsi->back->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + strlcpy(speed, "40 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_20GB: + strncpy(speed, "20 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_10GB: + strlcpy(speed, "10 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_1GB: + strlcpy(speed, "1000 Mbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_100MB: + strncpy(speed, "100 Mbps", SPEED_SIZE); + break; + default: + break; + } + + switch (vsi->back->hw.fc.current_mode) { + case I40E_FC_FULL: + strlcpy(fc, "RX/TX", FC_SIZE); + break; + case I40E_FC_TX_PAUSE: + strlcpy(fc, "TX", FC_SIZE); + break; + case I40E_FC_RX_PAUSE: + strlcpy(fc, "RX", FC_SIZE); + break; + default: + strlcpy(fc, "None", FC_SIZE); + break; + } + + netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", + speed, fc); +} + +/** + * i40e_up_complete - Finish the last steps of bringing up a connection + * @vsi: the VSI being configured + **/ +static int i40e_up_complete(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int err; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_vsi_configure_msix(vsi); + else + i40e_configure_msi_and_legacy(vsi); + + /* start rings */ + err = i40e_vsi_control_rings(vsi, true); + if (err) + return err; + + clear_bit(__I40E_DOWN, &vsi->state); + i40e_napi_enable_all(vsi); + i40e_vsi_enable_irq(vsi); + + if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && + (vsi->netdev)) { + i40e_print_link_message(vsi, true); + netif_tx_start_all_queues(vsi->netdev); + netif_carrier_on(vsi->netdev); + } else if (vsi->netdev) { + i40e_print_link_message(vsi, false); + /* need to check for qualified module here*/ + if ((pf->hw.phy.link_info.link_info & + I40E_AQ_MEDIA_AVAILABLE) && + (!(pf->hw.phy.link_info.an_info & + I40E_AQ_QUALIFIED_MODULE))) + netdev_err(vsi->netdev, + "the driver failed to link because an unqualified module was detected."); + } + + /* replay FDIR SB filters */ + if (vsi->type == I40E_VSI_FDIR) { + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = 0; + if (pf->fd_tcp_rule > 0) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + pf->fd_tcp_rule = 0; + } + i40e_fdir_filter_restore(vsi); + } + i40e_service_event_schedule(pf); + + return 0; +} + +/** + * i40e_vsi_reinit_locked - Reset the VSI + * @vsi: the VSI being configured + * + * Rebuild the ring structs after some configuration + * has changed, e.g. MTU size. + **/ +static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + usleep_range(1000, 2000); + i40e_down(vsi); + + /* Give a VF some time to respond to the reset. The + * two second wait is based upon the watchdog cycle in + * the VF driver. + */ + if (vsi->type == I40E_VSI_SRIOV) + msleep(2000); + i40e_up(vsi); + clear_bit(__I40E_CONFIG_BUSY, &pf->state); +} + +/** + * i40e_up - Bring the connection back up after being down + * @vsi: the VSI being configured + **/ +int i40e_up(struct i40e_vsi *vsi) +{ + int err; + + err = i40e_vsi_configure(vsi); + if (!err) + err = i40e_up_complete(vsi); + + return err; +} + +/** + * i40e_down - Shutdown the connection processing + * @vsi: the VSI being stopped + **/ +void i40e_down(struct i40e_vsi *vsi) +{ + int i; + + /* It is assumed that the caller of this function + * sets the vsi->state __I40E_DOWN bit. + */ + if (vsi->netdev) { + netif_carrier_off(vsi->netdev); + netif_tx_disable(vsi->netdev); + } + i40e_vsi_disable_irq(vsi); + i40e_vsi_control_rings(vsi, false); + i40e_napi_disable_all(vsi); + + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_clean_tx_ring(vsi->tx_rings[i]); + i40e_clean_rx_ring(vsi->rx_rings[i]); + } +} + +/** + * i40e_setup_tc - configure multiple traffic classes + * @netdev: net device to configure + * @tc: number of traffic classes to enable + **/ +#ifdef I40E_FCOE +int i40e_setup_tc(struct net_device *netdev, u8 tc) +#else +static int i40e_setup_tc(struct net_device *netdev, u8 tc) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 enabled_tc = 0; + int ret = -EINVAL; + int i; + + /* Check if DCB enabled to continue */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + netdev_info(netdev, "DCB is not enabled for adapter\n"); + goto exit; + } + + /* Check if MFP enabled */ + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); + goto exit; + } + + /* Check whether tc count is within enabled limit */ + if (tc > i40e_pf_get_num_tc(pf)) { + netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); + goto exit; + } + + /* Generate TC map for number of tc requested */ + for (i = 0; i < tc; i++) + enabled_tc |= (1 << i); + + /* Requesting same TC configuration as already enabled */ + if (enabled_tc == vsi->tc_config.enabled_tc) + return 0; + + /* Quiesce VSI queues */ + i40e_quiesce_vsi(vsi); + + /* Configure VSI for enabled TCs */ + ret = i40e_vsi_config_tc(vsi, enabled_tc); + if (ret) { + netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", + vsi->seid); + goto exit; + } + + /* Unquiesce VSI */ + i40e_unquiesce_vsi(vsi); + +exit: + return ret; +} + +/** + * i40e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the netdev watchdog subtask is + * enabled, and the stack is notified that the interface is ready. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_open(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int err; + + /* disallow open during test or if eeprom is broken */ + if (test_bit(__I40E_TESTING, &pf->state) || + test_bit(__I40E_BAD_EEPROM, &pf->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + err = i40e_vsi_open(vsi); + if (err) + return err; + + /* configure global TSO hardware offload settings */ + wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN | + TCP_FLAG_CWR) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); + +#ifdef CONFIG_I40E_VXLAN + vxlan_get_rx_port(netdev); +#endif + + return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + char int_name[I40E_INT_NAME_STR_LEN]; + int err; + + /* allocate descriptors */ + err = i40e_vsi_setup_tx_resources(vsi); + if (err) + goto err_setup_tx; + err = i40e_vsi_setup_rx_resources(vsi); + if (err) + goto err_setup_rx; + + err = i40e_vsi_configure(vsi); + if (err) + goto err_setup_rx; + + if (vsi->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + err = i40e_vsi_request_irq(vsi, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + } else if (vsi->type == I40E_VSI_FDIR) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + err = i40e_vsi_request_irq(vsi, int_name); + + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = i40e_up_complete(vsi); + if (err) + goto err_up_complete; + + return 0; + +err_up_complete: + i40e_down(vsi); +err_set_queues: + i40e_vsi_free_irq(vsi); +err_setup_rx: + i40e_vsi_free_rx_resources(vsi); +err_setup_tx: + i40e_vsi_free_tx_resources(vsi); + if (vsi == pf->vsi[pf->lan_vsi]) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + return err; +} + +/** + * i40e_fdir_filter_exit - Cleans up the Flow Director accounting + * @pf: Pointer to PF + * + * This function destroys the hlist where all the Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_exit(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *filter; + struct hlist_node *node2; + + hlist_for_each_entry_safe(filter, node2, + &pf->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + pf->fdir_pf_active_filters = 0; +} + +/** + * i40e_close - Disables a network interface + * @netdev: network interface device structure + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * this netdev interface is disabled. + * + * Returns 0, this is not allowed to fail + **/ +#ifdef I40E_FCOE +int i40e_close(struct net_device *netdev) +#else +static int i40e_close(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + i40e_vsi_close(vsi); + + return 0; +} + +/** + * i40e_do_reset - Start a PF or Core Reset sequence + * @pf: board private structure + * @reset_flags: which reset is requested + * + * The essential difference in resets is that the PF Reset + * doesn't clear the packet buffers, doesn't reset the PE + * firmware, and doesn't bother the other PFs on the chip. + **/ +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) +{ + u32 val; + + WARN_ON(in_interrupt()); + + if (i40e_check_asq_alive(&pf->hw)) + i40e_vc_notify_reset(pf); + + /* do the biggest reset indicated */ + if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { + + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); + val = rd32(&pf->hw, I40E_GLGEN_RTRIG); + val |= I40E_GLGEN_RTRIG_GLOBR_MASK; + wr32(&pf->hw, I40E_GLGEN_RTRIG, val); + + } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { + + /* Request a Core Reset + * + * Same as Global Reset, except does *not* include the MAC/PHY + */ + dev_dbg(&pf->pdev->dev, "CoreR requested\n"); + val = rd32(&pf->hw, I40E_GLGEN_RTRIG); + val |= I40E_GLGEN_RTRIG_CORER_MASK; + wr32(&pf->hw, I40E_GLGEN_RTRIG, val); + i40e_flush(&pf->hw); + + } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { + + /* Request a PF Reset + * + * Resets only the PF-specific registers + * + * This goes directly to the tear-down and rebuild of + * the switch, since we need to do all the recovery as + * for the Core Reset. + */ + dev_dbg(&pf->pdev->dev, "PFR requested\n"); + i40e_handle_reset_warning(pf); + + } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { + int v; + + /* Find the VSI(s) that requested a re-init */ + dev_info(&pf->pdev->dev, + "VSI reinit requested\n"); + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && + test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { + i40e_vsi_reinit_locked(pf->vsi[v]); + clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); + } + } + + /* no further action needed, so return now */ + return; + } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { + int v; + + /* Find the VSI(s) that needs to be brought down */ + dev_info(&pf->pdev->dev, "VSI down requested\n"); + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && + test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { + set_bit(__I40E_DOWN, &vsi->state); + i40e_down(vsi); + clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); + } + } + + /* no further action needed, so return now */ + return; + } else { + dev_info(&pf->pdev->dev, + "bad reset request 0x%08x\n", reset_flags); + return; + } +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_need_reconfig - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + **/ +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, + &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prioritytable, + &old_cfg->etscfg.prioritytable, + sizeof(new_cfg->etscfg.prioritytable))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, + &old_cfg->pfc, + sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, + &old_cfg->app, + sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, + need_reconfig); + return need_reconfig; +} + +/** + * i40e_handle_lldp_event - Handle LLDP Change MIB event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static int i40e_handle_lldp_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_aqc_lldp_get_mib *mib = + (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; + struct i40e_hw *hw = &pf->hw; + struct i40e_dcbx_config tmp_dcbx_cfg; + bool need_reconfig = false; + int ret = 0; + u8 type; + + /* Not DCB capable or capability disabled */ + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + return ret; + + /* Ignore if event is not for Nearest Bridge */ + type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) + & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib bridge type 0x%x\n", __func__, type); + if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) + return ret; + + /* Check MIB Type and return if event for Remote MIB update */ + type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib type %s\n", __func__, + type ? "remote" : "local"); + if (type == I40E_AQ_LLDP_MIB_REMOTE) { + /* Update the remote cached instance and return */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + goto exit; + } + + /* Store the old configuration */ + tmp_dcbx_cfg = hw->local_dcbx_config; + + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); + /* Get updated DCBX data from firmware */ + ret = i40e_get_dcb_config(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); + goto exit; + } + + /* No change detected in DCBX configs */ + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { + dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); + goto exit; + } + + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); + + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); + + if (!need_reconfig) + goto exit; + + /* Enable DCB tagging only when more than one TC */ + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) + pf->flags |= I40E_FLAG_DCB_ENABLED; + else + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + + set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* Reconfiguration needed quiesce all VSIs */ + i40e_pf_quiesce_all_vsi(pf); + + /* Changes in configuration update VEB/VSI */ + i40e_dcb_reconfigure(pf); + + ret = i40e_resume_port_tx(pf); + + clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* In case of error no point in resuming VSIs */ + if (ret) + goto exit; + + /* Wait for the PF's Tx queues to be disabled */ + ret = i40e_pf_wait_txq_disabled(pf); + if (ret) { + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } else { + i40e_pf_unquiesce_all_vsi(pf); + } + +exit: + return ret; +} +#endif /* CONFIG_I40E_DCB */ + +/** + * i40e_do_reset_safe - Protected reset path for userland calls. + * @pf: board private structure + * @reset_flags: which reset is requested + * + **/ +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) +{ + rtnl_lock(); + i40e_do_reset(pf, reset_flags); + rtnl_unlock(); +} + +/** + * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event + * @pf: board private structure + * @e: event info posted on ARQ + * + * Handler for LAN Queue Overflow Event generated by the firmware for PF + * and VF queues + **/ +static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_aqc_lan_overflow *data = + (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; + u32 queue = le32_to_cpu(data->prtdcb_rupto); + u32 qtx_ctl = le32_to_cpu(data->otx_ctl); + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + u16 vf_id; + + dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", + queue, qtx_ctl); + + /* Queue belongs to VF, find the VF and issue VF reset */ + if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) + >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { + vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) + >> I40E_QTX_CTL_VFVM_INDX_SHIFT); + vf_id -= hw->func_caps.vf_base_id; + vf = &pf->vf[vf_id]; + i40e_vc_notify_vf_reset(vf); + /* Allow VF to process pending reset notification */ + msleep(20); + i40e_reset_vf(vf, false); + } +} + +/** + * i40e_service_event_complete - Finish up the service event + * @pf: board private structure + **/ +static void i40e_service_event_complete(struct i40e_pf *pf) +{ + BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__I40E_SERVICE_SCHED, &pf->state); +} + +/** + * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters + * @pf: board private structure + **/ +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); + return fcnt_prog; +} + +/** + * i40e_get_current_fd_count - Get total FD filters programmed for this PF + * @pf: board private structure + **/ +u32 i40e_get_current_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_get_global_fd_count - Get total FD filters programmed on device + * @pf: board private structure + **/ +u32 i40e_get_global_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); + fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> + I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled + * @pf: board private structure + **/ +void i40e_fdir_check_and_reenable(struct i40e_pf *pf) +{ + u32 fcnt_prog, fcnt_avail; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + + /* Check if, FD SB or ATR was auto disabled and if there is enough room + * to re-enable + */ + fcnt_prog = i40e_get_global_fd_count(pf); + fcnt_avail = pf->fdir_pf_filter_count; + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || + (pf->fd_add_err == 0) || + (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + } + } + /* Wait for some more space to be available to turn on ATR */ + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + } + } +} + +#define I40E_MIN_FD_FLUSH_INTERVAL 10 +#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 +/** + * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB + * @pf: board private structure + **/ +static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) +{ + unsigned long min_flush_time; + int flush_wait_retry = 50; + bool disable_atr = false; + int fd_room; + int reg; + + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + + if (time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { + /* If the flush is happening too quick and we have mostly + * SB rules we should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + + pf->fd_flush_timestamp = jiffies; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + } + } +} + +/** + * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * @pf: board private structure + **/ +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) +{ + return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; +} + +/* We can see up to 256 filter programming desc in transit if the filters are + * being applied really fast; before we see the first + * filter miss error on Rx queue 0. Accumulating enough error messages before + * reacting will make sure we don't cause flush too often. + */ +#define I40E_MAX_FD_PROGRAM_ERROR 256 + +/** + * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table + * @pf: board private structure + **/ +static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) +{ + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &pf->state)) + return; + + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + i40e_fdir_flush_and_replay(pf); + + i40e_fdir_check_and_reenable(pf); + +} + +/** + * i40e_vsi_link_event - notify VSI of a link event + * @vsi: vsi to be notified + * @link_up: link up or down + **/ +static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) +{ + if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) + return; + + switch (vsi->type) { + case I40E_VSI_MAIN: +#ifdef I40E_FCOE + case I40E_VSI_FCOE: +#endif + if (!vsi->netdev || !vsi->netdev_registered) + break; + + if (link_up) { + netif_carrier_on(vsi->netdev); + netif_tx_wake_all_queues(vsi->netdev); + } else { + netif_carrier_off(vsi->netdev); + netif_tx_stop_all_queues(vsi->netdev); + } + break; + + case I40E_VSI_SRIOV: + case I40E_VSI_VMDQ2: + case I40E_VSI_CTRL: + case I40E_VSI_MIRROR: + default: + /* there is no notification for other VSIs */ + break; + } +} + +/** + * i40e_veb_link_event - notify elements on the veb of a link event + * @veb: veb to be notified + * @link_up: link up or down + **/ +static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) +{ + struct i40e_pf *pf; + int i; + + if (!veb || !veb->pf) + return; + pf = veb->pf; + + /* depth first... */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) + i40e_veb_link_event(pf->veb[i], link_up); + + /* ... now the local VSIs */ + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) + i40e_vsi_link_event(pf->vsi[i], link_up); +} + +/** + * i40e_link_event - Update netif_carrier status + * @pf: board private structure + **/ +static void i40e_link_event(struct i40e_pf *pf) +{ + bool new_link, old_link; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 new_link_speed, old_link_speed; + + /* set this to force the get_link_status call to refresh state */ + pf->hw.phy.get_link_info = true; + + old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); + new_link = i40e_get_link_status(&pf->hw); + old_link_speed = pf->hw.phy.link_info_old.link_speed; + new_link_speed = pf->hw.phy.link_info.link_speed; + + if (new_link == old_link && + new_link_speed == old_link_speed && + (test_bit(__I40E_DOWN, &vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) + return; + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40e_print_link_message(vsi, new_link); + + /* Notify the base of the switch tree connected to + * the link. Floating VEBs are not notified. + */ + if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); + else + i40e_vsi_link_event(vsi, new_link); + + if (pf->vf) + i40e_vc_notify_link_state(pf); + + if (pf->flags & I40E_FLAG_PTP) + i40e_ptp_set_increment(pf); +} + +/** + * i40e_check_hang_subtask - Check for hung queues and dropped interrupts + * @pf: board private structure + * + * Set the per-queue flags to request a check for stuck queues in the irq + * clean functions, then force interrupts to be sure the irq clean is called. + **/ +static void i40e_check_hang_subtask(struct i40e_pf *pf) +{ + int i, v; + + /* If we're down or resetting, just bail */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + /* for each VSI/netdev + * for each Tx queue + * set the check flag + * for each q_vector + * force an interrupt + */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + int armed = 0; + + if (!pf->vsi[v] || + test_bit(__I40E_DOWN, &vsi->state) || + (vsi->netdev && !netif_carrier_ok(vsi->netdev))) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + set_check_for_tx_hang(vsi->tx_rings[i]); + if (test_bit(__I40E_HANG_CHECK_ARMED, + &vsi->tx_rings[i]->state)) + armed++; + } + + if (armed) { + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, + (I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); + } else { + u16 vec = vsi->base_vector - 1; + u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); + for (i = 0; i < vsi->num_q_vectors; i++, vec++) + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(vec), val); + } + i40e_flush(&vsi->back->hw); + } + } +} + +/** + * i40e_watchdog_subtask - periodic checks not using event driven response + * @pf: board private structure + **/ +static void i40e_watchdog_subtask(struct i40e_pf *pf) +{ + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + /* make sure we don't do these things too often */ + if (time_before(jiffies, (pf->service_timer_previous + + pf->service_timer_period))) + return; + pf->service_timer_previous = jiffies; + + i40e_check_hang_subtask(pf); + i40e_link_event(pf); + + /* Update the stats for active netdevs so the network stack + * can look at updated numbers whenever it cares to + */ + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->netdev) + i40e_update_stats(pf->vsi[i]); + + /* Update the stats for the active switching components */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i]) + i40e_update_veb_stats(pf->veb[i]); + + i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); +} + +/** + * i40e_reset_subtask - Set up for resetting the device and driver + * @pf: board private structure + **/ +static void i40e_reset_subtask(struct i40e_pf *pf) +{ + u32 reset_flags = 0; + + rtnl_lock(); + if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_REINIT_REQUESTED); + clear_bit(__I40E_REINIT_REQUESTED, &pf->state); + } + if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); + clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); + clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); + clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_DOWN_REQUESTED); + clear_bit(__I40E_DOWN_REQUESTED, &pf->state); + } + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { + i40e_handle_reset_warning(pf); + goto unlock; + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && + !test_bit(__I40E_DOWN, &pf->state) && + !test_bit(__I40E_CONFIG_BUSY, &pf->state)) + i40e_do_reset(pf, reset_flags); + +unlock: + rtnl_unlock(); +} + +/** + * i40e_handle_link_event - Handle link event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static void i40e_handle_link_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_aqc_get_link_status *status = + (struct i40e_aqc_get_link_status *)&e->desc.params.raw; + + /* save off old link status information */ + hw->phy.link_info_old = hw->phy.link_info; + + /* Do a new status request to re-enable LSE reporting + * and load new status information into the hw struct + * This completely ignores any state information + * in the ARQ event info, instead choosing to always + * issue the AQ update link status command. + */ + i40e_link_event(pf); + + /* check for unqualified module, if link is down */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP))) + dev_err(&pf->pdev->dev, + "The driver failed to link because an unqualified module was detected.\n"); +} + +/** + * i40e_clean_adminq_subtask - Clean the AdminQ rings + * @pf: board private structure + **/ +static void i40e_clean_adminq_subtask(struct i40e_pf *pf) +{ + struct i40e_arq_event_info event; + struct i40e_hw *hw = &pf->hw; + u16 pending, i = 0; + i40e_status ret; + u16 opcode; + u32 oldval; + u32 val; + + /* Do not run clean AQ when PF reset fails */ + if (test_bit(__I40E_RESET_FAILED, &pf->state)) + return; + + /* check for error indications */ + val = rd32(&pf->hw, pf->hw.aq.arq.len); + oldval = val; + if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { + dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; + } + if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { + dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; + } + if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { + dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; + } + if (oldval != val) + wr32(&pf->hw, pf->hw.aq.arq.len, val); + + val = rd32(&pf->hw, pf->hw.aq.asq.len); + oldval = val; + if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { + dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; + } + if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { + dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; + } + if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { + dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; + } + if (oldval != val) + wr32(&pf->hw, pf->hw.aq.asq.len, val); + + event.buf_len = I40E_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return; + + do { + ret = i40e_clean_arq_element(hw, &event, &pending); + if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) + break; + else if (ret) { + dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); + break; + } + + opcode = le16_to_cpu(event.desc.opcode); + switch (opcode) { + + case i40e_aqc_opc_get_link_status: + i40e_handle_link_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_pf: + ret = i40e_vc_process_vf_msg(pf, + le16_to_cpu(event.desc.retval), + le32_to_cpu(event.desc.cookie_high), + le32_to_cpu(event.desc.cookie_low), + event.msg_buf, + event.msg_len); + break; + case i40e_aqc_opc_lldp_update_mib: + dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); +#ifdef CONFIG_I40E_DCB + rtnl_lock(); + ret = i40e_handle_lldp_event(pf, &event); + rtnl_unlock(); +#endif /* CONFIG_I40E_DCB */ + break; + case i40e_aqc_opc_event_lan_overflow: + dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); + i40e_handle_lan_overflow_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_peer: + dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); + break; + case i40e_aqc_opc_nvm_erase: + case i40e_aqc_opc_nvm_update: + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + break; + default: + dev_info(&pf->pdev->dev, + "ARQ Error: Unknown event 0x%04x received\n", + opcode); + break; + } + } while (pending && (i++ < pf->adminq_work_limit)); + + clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + /* re-enable Admin queue interrupt cause */ + val = rd32(hw, I40E_PFINT_ICR0_ENA); + val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, val); + i40e_flush(hw); + + kfree(event.msg_buf); +} + +/** + * i40e_verify_eeprom - make sure eeprom is good to use + * @pf: board private structure + **/ +static void i40e_verify_eeprom(struct i40e_pf *pf) +{ + int err; + + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + /* retry in case of garbage read */ + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", + err); + set_bit(__I40E_BAD_EEPROM, &pf->state); + } + } + + if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); + clear_bit(__I40E_BAD_EEPROM, &pf->state); + } +} + +/** + * i40e_enable_pf_switch_lb + * @pf: pointer to the PF structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the PF structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** + * i40e_reconstitute_veb - rebuild the VEB and anything connected to it + * @veb: pointer to the VEB instance + * + * This is a recursive function that first builds the attached VSIs then + * recurses in to build the next layer of VEB. We track the connections + * through our own index numbers because the seid's from the HW could + * change across the reset. + **/ +static int i40e_reconstitute_veb(struct i40e_veb *veb) +{ + struct i40e_vsi *ctl_vsi = NULL; + struct i40e_pf *pf = veb->pf; + int v, veb_idx; + int ret; + + /* build VSI that owns this VEB, temporarily attached to base VEB */ + for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { + if (pf->vsi[v] && + pf->vsi[v]->veb_idx == veb->idx && + pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { + ctl_vsi = pf->vsi[v]; + break; + } + } + if (!ctl_vsi) { + dev_info(&pf->pdev->dev, + "missing owner VSI for veb_idx %d\n", veb->idx); + ret = -ENOENT; + goto end_reconstitute; + } + if (ctl_vsi != pf->vsi[pf->lan_vsi]) + ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; + ret = i40e_add_vsi(ctl_vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of owner VSI failed: %d\n", ret); + goto end_reconstitute; + } + i40e_vsi_reset_stats(ctl_vsi); + + /* create the VEB in the switch and move the VSI onto the VEB */ + ret = i40e_add_veb(veb, ctl_vsi); + if (ret) + goto end_reconstitute; + + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; + i40e_config_bridge_mode(veb); + + /* create the remaining VSIs attached to this VEB */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) + continue; + + if (pf->vsi[v]->veb_idx == veb->idx) { + struct i40e_vsi *vsi = pf->vsi[v]; + vsi->uplink_seid = veb->seid; + ret = i40e_add_vsi(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of vsi_idx %d failed: %d\n", + v, ret); + goto end_reconstitute; + } + i40e_vsi_reset_stats(vsi); + } + } + + /* create any VEBs attached to this VEB - RECURSION */ + for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { + if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { + pf->veb[veb_idx]->uplink_seid = veb->seid; + ret = i40e_reconstitute_veb(pf->veb[veb_idx]); + if (ret) + break; + } + } + +end_reconstitute: + return ret; +} + +/** + * i40e_get_capabilities - get info about the HW + * @pf: the PF struct + **/ +static int i40e_get_capabilities(struct i40e_pf *pf) +{ + struct i40e_aqc_list_capabilities_element_resp *cap_buf; + u16 data_size; + int buf_len; + int err; + + buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); + do { + cap_buf = kzalloc(buf_len, GFP_KERNEL); + if (!cap_buf) + return -ENOMEM; + + /* this loads the data into the hw struct for us */ + err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, + &data_size, + i40e_aqc_opc_list_func_capabilities, + NULL); + /* data loaded, buffer no longer needed */ + kfree(cap_buf); + + if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { + /* retry with a larger buffer */ + buf_len = data_size; + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { + dev_info(&pf->pdev->dev, + "capability discovery failed: aq=%d\n", + pf->hw.aq.asq_last_status); + return -ENODEV; + } + } while (err); + + if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || + (pf->hw.aq.fw_maj_ver < 2)) { + pf->hw.func_caps.num_msix_vectors++; + pf->hw.func_caps.num_msix_vectors_vf++; + } + + if (pf->hw.debug_mask & I40E_DEBUG_USER) + dev_info(&pf->pdev->dev, + "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", + pf->hw.pf_id, pf->hw.func_caps.num_vfs, + pf->hw.func_caps.num_msix_vectors, + pf->hw.func_caps.num_msix_vectors_vf, + pf->hw.func_caps.fd_filters_guaranteed, + pf->hw.func_caps.fd_filters_best_effort, + pf->hw.func_caps.num_tx_qp, + pf->hw.func_caps.num_vsis); + +#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + + pf->hw.func_caps.num_vfs) + if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { + dev_info(&pf->pdev->dev, + "got num_vsis %d, setting num_vsis to %d\n", + pf->hw.func_caps.num_vsis, DEF_NUM_VSI); + pf->hw.func_caps.num_vsis = DEF_NUM_VSI; + } + + return 0; +} + +static int i40e_vsi_clear(struct i40e_vsi *vsi); + +/** + * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband + * @pf: board private structure + **/ +static void i40e_fdir_sb_setup(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi; + int i; + + /* quick workaround for an NVM issue that leaves a critical register + * uninitialized + */ + if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { + static const u32 hkey[] = { + 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, + 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, + 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, + 0x95b3a76d}; + + for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) + wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); + } + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + /* find existing VSI and see if it needs configuring */ + vsi = NULL; + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + vsi = pf->vsi[i]; + break; + } + } + + /* create a new VSI if none exists */ + if (!vsi) { + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, + pf->vsi[pf->lan_vsi]->seid, 0); + if (!vsi) { + dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + return; + } + } + + i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); +} + +/** + * i40e_fdir_teardown - release the Flow Director resources + * @pf: board private structure + **/ +static void i40e_fdir_teardown(struct i40e_pf *pf) +{ + int i; + + i40e_fdir_filter_exit(pf); + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + i40e_vsi_release(pf->vsi[i]); + break; + } + } +} + +/** + * i40e_prep_for_reset - prep for the core to reset + * @pf: board private structure + * + * Close up the VFs and other things in prep for PF Reset. + **/ +static void i40e_prep_for_reset(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret = 0; + u32 v; + + clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + return; + + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + + /* quiesce the VSIs and their queues that are not already DOWN */ + i40e_pf_quiesce_all_vsi(pf); + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + pf->vsi[v]->seid = 0; + } + + i40e_shutdown_adminq(&pf->hw); + + /* call shutdown HMC */ + if (hw->hmc.hmc_obj) { + ret = i40e_shutdown_lan_hmc(hw); + if (ret) + dev_warn(&pf->pdev->dev, + "shutdown_lan_hmc failed: %d\n", ret); + } +} + +/** + * i40e_send_version - update firmware with driver version + * @pf: PF struct + */ +static void i40e_send_version(struct i40e_pf *pf) +{ + struct i40e_driver_version dv; + + dv.major_version = DRV_VERSION_MAJOR; + dv.minor_version = DRV_VERSION_MINOR; + dv.build_version = DRV_VERSION_BUILD; + dv.subbuild_version = 0; + strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); + i40e_aq_send_driver_version(&pf->hw, &dv, NULL); +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +{ + struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; + i40e_status ret; + u32 v; + + /* Now we wait for GRST to settle out. + * We don't have to delete the VEBs or VSIs from the hw switch + * because the reset will make them disappear. + */ + ret = i40e_pf_reset(hw); + if (ret) { + dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); + set_bit(__I40E_RESET_FAILED, &pf->state); + goto clear_recovery; + } + pf->pfr_count++; + + if (test_bit(__I40E_DOWN, &pf->state)) + goto clear_recovery; + dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); + + /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ + ret = i40e_init_adminq(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); + goto clear_recovery; + } + + /* re-verify the eeprom if we just had an EMP reset */ + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) + i40e_verify_eeprom(pf); + + i40e_clear_pxe_mode(hw); + ret = i40e_get_capabilities(pf); + if (ret) { + dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", + ret); + goto end_core_reset; + } + + ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, + pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + if (ret) { + dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); + goto end_core_reset; + } + ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (ret) { + dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); + goto end_core_reset; + } + +#ifdef CONFIG_I40E_DCB + ret = i40e_init_pf_dcb(pf); + if (ret) { + dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + /* Continue without DCB enabled */ + } +#endif /* CONFIG_I40E_DCB */ +#ifdef I40E_FCOE + ret = i40e_init_pf_fcoe(pf); + if (ret) + dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); + +#endif + /* do basic switch setup */ + ret = i40e_setup_pf_switch(pf, reinit); + if (ret) + goto end_core_reset; + + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + ret = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (ret) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + + /* make sure our flow control settings are restored */ + ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); + if (ret) + dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + + /* Rebuild the VSIs and VEBs that existed before reset. + * They are still in our local switch element arrays, so only + * need to rebuild the switch model in the HW. + * + * If there were VEBs but the reconstitution failed, we'll try + * try to recover minimal use by getting the basic PF VSI working. + */ + if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { + dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); + /* find the one VEB connected to the MAC, and find orphans */ + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + + if (pf->veb[v]->uplink_seid == pf->mac_seid || + pf->veb[v]->uplink_seid == 0) { + ret = i40e_reconstitute_veb(pf->veb[v]); + + if (!ret) + continue; + + /* If Main VEB failed, we're in deep doodoo, + * so give up rebuilding the switch and set up + * for minimal rebuild of PF VSI. + * If orphan failed, we'll report the error + * but try to keep going. + */ + if (pf->veb[v]->uplink_seid == pf->mac_seid) { + dev_info(&pf->pdev->dev, + "rebuild of switch failed: %d, will try to set up simple PF connection\n", + ret); + pf->vsi[pf->lan_vsi]->uplink_seid + = pf->mac_seid; + break; + } else if (pf->veb[v]->uplink_seid == 0) { + dev_info(&pf->pdev->dev, + "rebuild of orphan VEB failed: %d\n", + ret); + } + } + } + } + + if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { + dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); + /* no VEB, so rebuild only the Main VSI */ + ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of Main VSI failed: %d\n", ret); + goto end_core_reset; + } + } + + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* reinit the misc interrupt */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + ret = i40e_setup_misc_vector(pf); + + /* restart the VSIs that were rebuilt and running before the reset */ + i40e_pf_unquiesce_all_vsi(pf); + + if (pf->num_alloc_vfs) { + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_reset_vf(&pf->vf[v], true); + } + + /* tell the firmware that we're starting */ + i40e_send_version(pf); + +end_core_reset: + clear_bit(__I40E_RESET_FAILED, &pf->state); +clear_recovery: + clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); +} + +/** + * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild + * @pf: board private structure + * + * Close up the VFs and other things in prep for a Core Reset, + * then get ready to rebuild the world. + **/ +static void i40e_handle_reset_warning(struct i40e_pf *pf) +{ + i40e_prep_for_reset(pf); + i40e_reset_and_rebuild(pf, false); +} + +/** + * i40e_handle_mdd_event + * @pf: pointer to the PF structure + * + * Called from the MDD irq handler to identify possibly malicious vfs + **/ +static void i40e_handle_mdd_event(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + bool mdd_detected = false; + bool pf_mdd_detected = false; + struct i40e_vf *vf; + u32 reg; + int i; + + if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", + event, queue, pf_num, vf_num); + wr32(hw, I40E_GL_MDET_TX, 0xffffffff); + mdd_detected = true; + } + reg = rd32(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", + event, queue, func); + wr32(hw, I40E_GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + reg = rd32(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + wr32(hw, I40E_PF_MDET_TX, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + reg = rd32(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + wr32(hw, I40E_PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF, initiate a reset */ + if (pf_mdd_detected) { + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + vf = &(pf->vf[i]); + reg = rd32(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); + } + + if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { + dev_info(&pf->pdev->dev, + "Too many MDD events on VF %d, disabled\n", i); + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF\n"); + set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } + } + + /* re-enable mdd interrupt cause */ + clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); +} + +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + __be16 port; + int i; + + if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + return; + + pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->pending_vxlan_bitmap & (1 << i)) { + pf->pending_vxlan_bitmap &= ~(1 << i); + port = pf->vxlan_ports[i]; + if (port) + ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), + I40E_AQC_TUNNEL_TYPE_VXLAN, + NULL, NULL); + else + ret = i40e_aq_del_udp_tunnel(hw, i, NULL); + + if (ret) { + dev_info(&pf->pdev->dev, + "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + port ? "add" : "delete", + ntohs(port), i, ret, + pf->hw.aq.asq_last_status); + pf->vxlan_ports[i] = 0; + } + } + } +} + +#endif +/** + * i40e_service_task - Run the driver's async subtasks + * @work: pointer to work_struct containing our data + **/ +static void i40e_service_task(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, + struct i40e_pf, + service_task); + unsigned long start_time = jiffies; + + /* don't bother with service tasks if a reset is in progress */ + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + i40e_service_event_complete(pf); + return; + } + + i40e_reset_subtask(pf); + i40e_handle_mdd_event(pf); + i40e_vc_process_vflr_event(pf); + i40e_watchdog_subtask(pf); + i40e_fdir_reinit_subtask(pf); + i40e_sync_filters_subtask(pf); +#ifdef CONFIG_I40E_VXLAN + i40e_sync_vxlan_filters_subtask(pf); +#endif + i40e_clean_adminq_subtask(pf); + + i40e_service_event_complete(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || + test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || + test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + i40e_service_event_schedule(pf); +} + +/** + * i40e_service_timer - timer callback + * @data: pointer to PF struct + **/ +static void i40e_service_timer(unsigned long data) +{ + struct i40e_pf *pf = (struct i40e_pf *)data; + + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + i40e_service_event_schedule(pf); +} + +/** + * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI + * @vsi: the VSI being configured + **/ +static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + switch (vsi->type) { + case I40E_VSI_MAIN: + vsi->alloc_queue_pairs = pf->num_lan_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + vsi->num_q_vectors = pf->num_lan_msix; + else + vsi->num_q_vectors = 1; + + break; + + case I40E_VSI_FDIR: + vsi->alloc_queue_pairs = 1; + vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = 1; + break; + + case I40E_VSI_VMDQ2: + vsi->alloc_queue_pairs = pf->num_vmdq_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = pf->num_vmdq_msix; + break; + + case I40E_VSI_SRIOV: + vsi->alloc_queue_pairs = pf->num_vf_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + break; + +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + vsi->alloc_queue_pairs = pf->num_fcoe_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = pf->num_fcoe_msix; + break; + +#endif /* I40E_FCOE */ + default: + WARN_ON(1); + return -ENODATA; + } + + return 0; +} + +/** + * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi + * @type: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) +{ + int size; + int ret = 0; + + /* allocate memory for both Tx and Rx ring pointers */ + size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; + vsi->tx_rings = kzalloc(size, GFP_KERNEL); + if (!vsi->tx_rings) + return -ENOMEM; + vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; + vsi->q_vectors = kzalloc(size, GFP_KERNEL); + if (!vsi->q_vectors) { + ret = -ENOMEM; + goto err_vectors; + } + } + return ret; + +err_vectors: + kfree(vsi->tx_rings); + return ret; +} + +/** + * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF + * @pf: board private structure + * @type: type of VSI + * + * On error: returns error code (negative) + * On success: returns vsi index in PF (positive) + **/ +static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) +{ + int ret = -ENODEV; + struct i40e_vsi *vsi; + int vsi_idx; + int i; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* VSI list may be fragmented if VSI creation/destruction has + * been happening. We can afford to do a quick scan to look + * for any free VSIs in the list. + * + * find next empty vsi slot, looping back around if necessary + */ + i = pf->next_vsi; + while (i < pf->num_alloc_vsi && pf->vsi[i]) + i++; + if (i >= pf->num_alloc_vsi) { + i = 0; + while (i < pf->next_vsi && pf->vsi[i]) + i++; + } + + if (i < pf->num_alloc_vsi && !pf->vsi[i]) { + vsi_idx = i; /* Found one! */ + } else { + ret = -ENODEV; + goto unlock_pf; /* out of VSI slots! */ + } + pf->next_vsi = ++i; + + vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); + if (!vsi) { + ret = -ENOMEM; + goto unlock_pf; + } + vsi->type = type; + vsi->back = pf; + set_bit(__I40E_DOWN, &vsi->state); + vsi->flags = 0; + vsi->idx = vsi_idx; + vsi->rx_itr_setting = pf->rx_itr_default; + vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; + vsi->netdev_registered = false; + vsi->work_limit = I40E_DEFAULT_IRQ_WORK; + INIT_LIST_HEAD(&vsi->mac_filter_list); + vsi->irqs_ready = false; + + ret = i40e_set_num_rings_in_vsi(vsi); + if (ret) + goto err_rings; + + ret = i40e_vsi_alloc_arrays(vsi, true); + if (ret) + goto err_rings; + + /* Setup default MSIX irq handler for VSI */ + i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + + pf->vsi[vsi_idx] = vsi; + ret = vsi_idx; + goto unlock_pf; + +err_rings: + pf->next_vsi = i - 1; + kfree(vsi); +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +/** + * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI + * @type: VSI pointer + * @free_qvectors: a bool to specify if q_vectors need to be freed. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(vsi->q_vectors); + vsi->q_vectors = NULL; + } + kfree(vsi->tx_rings); + vsi->tx_rings = NULL; + vsi->rx_rings = NULL; +} + +/** + * i40e_vsi_clear - Deallocate the VSI provided + * @vsi: the VSI being un-configured + **/ +static int i40e_vsi_clear(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf; + + if (!vsi) + return 0; + + if (!vsi->back) + goto free_vsi; + pf = vsi->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->vsi[vsi->idx]) { + dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", + vsi->idx, vsi->idx, vsi, vsi->type); + goto unlock_vsi; + } + + if (pf->vsi[vsi->idx] != vsi) { + dev_err(&pf->pdev->dev, + "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", + pf->vsi[vsi->idx]->idx, + pf->vsi[vsi->idx], + pf->vsi[vsi->idx]->type, + vsi->idx, vsi, vsi->type); + goto unlock_vsi; + } + + /* updates the PF for this cleared vsi */ + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); + i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); + + i40e_vsi_free_arrays(vsi, true); + + pf->vsi[vsi->idx] = NULL; + if (vsi->idx < pf->next_vsi) + pf->next_vsi = vsi->idx; + +unlock_vsi: + mutex_unlock(&pf->switch_mutex); +free_vsi: + kfree(vsi); + + return 0; +} + +/** + * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being cleaned + **/ +static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings && vsi->tx_rings[0]) { + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + vsi->rx_rings[i] = NULL; + } + } +} + +/** + * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being configured + **/ +static int i40e_alloc_rings(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_pf *pf = vsi->back; + int i; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + /* allocate space for both Tx and Rx in one shot */ + tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + if (!tx_ring) + goto err_out; + + tx_ring->queue_index = i; + tx_ring->reg_idx = vsi->base_queue + i; + tx_ring->ring_active = false; + tx_ring->vsi = vsi; + tx_ring->netdev = vsi->netdev; + tx_ring->dev = &pf->pdev->dev; + tx_ring->count = vsi->num_desc; + tx_ring->size = 0; + tx_ring->dcb_tc = 0; + vsi->tx_rings[i] = tx_ring; + + rx_ring = &tx_ring[1]; + rx_ring->queue_index = i; + rx_ring->reg_idx = vsi->base_queue + i; + rx_ring->ring_active = false; + rx_ring->vsi = vsi; + rx_ring->netdev = vsi->netdev; + rx_ring->dev = &pf->pdev->dev; + rx_ring->count = vsi->num_desc; + rx_ring->size = 0; + rx_ring->dcb_tc = 0; + if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) + set_ring_16byte_desc_enabled(rx_ring); + else + clear_ring_16byte_desc_enabled(rx_ring); + vsi->rx_rings[i] = rx_ring; + } + + return 0; + +err_out: + i40e_vsi_clear_rings(vsi); + return -ENOMEM; +} + +/** + * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel + * @pf: board private structure + * @vectors: the number of MSI-X vectors to request + * + * Returns the number of vectors reserved, or error + **/ +static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) +{ + vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, + I40E_MIN_MSIX, vectors); + if (vectors < 0) { + dev_info(&pf->pdev->dev, + "MSI-X vector reservation failed: %d\n", vectors); + vectors = 0; + } + + return vectors; +} + +/** + * i40e_init_msix - Setup the MSIX capability + * @pf: board private structure + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns the number of vectors reserved or negative on failure + **/ +static int i40e_init_msix(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int vectors_left; + int v_budget, i; + int v_actual; + + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + return -ENODEV; + + /* The number of vectors we'll request will be comprised of: + * - Add 1 for "other" cause for Admin Queue events, etc. + * - The number of LAN queue pairs + * - Queues being used for RSS. + * We don't need as many as max_rss_size vectors. + * use rss_size instead in the calculation since that + * is governed by number of cpus in the system. + * - assumes symmetric Tx/Rx pairing + * - The number of VMDq pairs +#ifdef I40E_FCOE + * - The number of FCOE qps. +#endif + * Once we count this up, try the request. + * + * If we can't get what we want, we'll simplify to nearly nothing + * and try again. If that still fails, we punt. + */ + vectors_left = hw->func_caps.num_msix_vectors; + v_budget = 0; + + /* reserve one vector for miscellaneous handler */ + if (vectors_left) { + v_budget++; + vectors_left--; + } + + /* reserve vectors for the main PF traffic queues */ + pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + vectors_left -= pf->num_lan_msix; + v_budget += pf->num_lan_msix; + + /* reserve one vector for sideband flow director */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (vectors_left) { + v_budget++; + vectors_left--; + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + } + } + +#ifdef I40E_FCOE + /* can we reserve enough for FCoE? */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + if (!vectors_left) + pf->num_fcoe_msix = 0; + else if (vectors_left >= pf->num_fcoe_qps) + pf->num_fcoe_msix = pf->num_fcoe_qps; + else + pf->num_fcoe_msix = 1; + v_budget += pf->num_fcoe_msix; + vectors_left -= pf->num_fcoe_msix; + } + +#endif + /* any vectors left over go for VMDq support */ + if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); + + /* if we're short on vectors for what's desired, we limit + * the queues per vmdq. If this is still more than are + * available, the user will need to change the number of + * queues/vectors used by the PF later with the ethtool + * channels command + */ + if (vmdq_vecs < vmdq_vecs_wanted) + pf->num_vmdq_qps = 1; + pf->num_vmdq_msix = pf->num_vmdq_qps; + + v_budget += vmdq_vecs; + vectors_left -= vmdq_vecs; + } + + pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), + GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < v_budget; i++) + pf->msix_entries[i].entry = i; + v_actual = i40e_reserve_msix_vectors(pf, v_budget); + + if (v_actual != v_budget) { + /* If we have limited resources, we will start with no vectors + * for the special features and then allocate vectors to some + * of these features based on the policy and at the end disable + * the features that did not get any vectors. + */ +#ifdef I40E_FCOE + pf->num_fcoe_qps = 0; + pf->num_fcoe_msix = 0; +#endif + pf->num_vmdq_msix = 0; + } + + if (v_actual < I40E_MIN_MSIX) { + pf->flags &= ~I40E_FLAG_MSIX_ENABLED; + kfree(pf->msix_entries); + pf->msix_entries = NULL; + return -ENODEV; + + } else if (v_actual == I40E_MIN_MSIX) { + /* Adjust for minimal MSIX use */ + pf->num_vmdq_vsis = 0; + pf->num_vmdq_qps = 0; + pf->num_lan_qps = 1; + pf->num_lan_msix = 1; + + } else if (v_actual != v_budget) { + int vec; + + /* reserve the misc vector */ + vec = v_actual - 1; + + /* Scale vector usage down */ + pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ + pf->num_vmdq_vsis = 1; + pf->num_vmdq_qps = 1; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + + /* partition out the remaining vectors */ + switch (vec) { + case 2: + pf->num_lan_msix = 1; + break; + case 3: +#ifdef I40E_FCOE + /* give one vector to FCoE */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + pf->num_lan_msix = 1; + pf->num_fcoe_msix = 1; + } +#else + pf->num_lan_msix = 2; +#endif + break; + default: +#ifdef I40E_FCOE + /* give one vector to FCoE */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + pf->num_fcoe_msix = 1; + vec--; + } +#endif + /* give the rest to the PF */ + pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); + break; + } + } + + if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + (pf->num_vmdq_msix == 0)) { + dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); + pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; + } +#ifdef I40E_FCOE + + if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { + dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + } +#endif + return v_actual; +} + +/** + * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the vsi struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, + i40e_napi_poll, NAPI_POLL_WEIGHT); + + q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->tx.latency_range = I40E_LOW_LATENCY; + + /* tie q_vector and vsi together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int v_idx, num_q_vectors; + int err; + + /* if not MSIX, give the one vector only to the LAN VSI */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + num_q_vectors = vsi->num_q_vectors; + else if (vsi == pf->vsi[pf->lan_vsi]) + num_q_vectors = 1; + else + return -EINVAL; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = i40e_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + i40e_free_q_vector(vsi, v_idx); + + return err; +} + +/** + * i40e_init_interrupt_scheme - Determine proper interrupt scheme + * @pf: board private structure to initialize + **/ +static int i40e_init_interrupt_scheme(struct i40e_pf *pf) +{ + int vectors = 0; + ssize_t size; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + vectors = i40e_init_msix(pf); + if (vectors < 0) { + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_RSS_ENABLED | + I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + + /* rework the queue expectations without MSIX */ + i40e_determine_queue_usage(pf); + } + } + + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && + (pf->flags & I40E_FLAG_MSI_ENABLED)) { + dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); + vectors = pci_enable_msi(pf->pdev); + if (vectors < 0) { + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", + vectors); + pf->flags &= ~I40E_FLAG_MSI_ENABLED; + } + vectors = 1; /* one MSI or Legacy vector */ + } + + if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); + + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = vectors; + pf->irq_pile->search_hint = 0; + + /* track first vector for misc interrupts, ignore return */ + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); + + return 0; +} + +/** + * i40e_setup_misc_vector - Setup the misc vector to handle non queue events + * @pf: board private structure + * + * This sets up the handler for MSIX 0, which is used to manage the + * non-queue interrupts, e.g. AdminQ and errors. This is not used + * when in MSI or Legacy interrupt mode. + **/ +static int i40e_setup_misc_vector(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + /* Only request the irq if this is the first time through, and + * not when we're rebuilding after a Reset + */ + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_info(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + return -EFAULT; + } + } + + i40e_enable_misc_int_causes(pf); + + /* associate no queues to the misc vector */ + wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); + + i40e_flush(hw); + + i40e_irq_dynamic_enable_icr0(pf); + + return err; +} + +/** + * i40e_config_rss - Prepare for RSS if used + * @pf: board private structure + **/ +static int i40e_config_rss(struct i40e_pf *pf) +{ + u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_hw *hw = &pf->hw; + u32 lut = 0; + int i, j; + u64 hena; + u32 reg_val; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); + + /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ + hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | + ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + hena |= I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); + wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + + /* Check capability and Set table size and register per hw expectation*/ + reg_val = rd32(hw, I40E_PFQF_CTL_0); + if (pf->rss_table_size == 512) + reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; + else + reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; + wr32(hw, I40E_PFQF_CTL_0, reg_val); + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { + + /* The assumption is that lan qp count will be the highest + * qp count for any PF VSI that needs RSS. + * If multiple VSIs need RSS support, all the qp counts + * for those VSIs should be a power of 2 for RSS to work. + * If LAN VSI is the only consumer for RSS then this requirement + * is not necessary. + */ + if (j == vsi->rss_size) + j = 0; + /* lut = 4-byte sliding window of 4 lut entries */ + lut = (lut << 8) | (j & + ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); + /* On i = 3, we have 4 entries in lut; write to the register */ + if ((i & 3) == 3) + wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + i40e_flush(hw); + + return 0; +} + +/** + * i40e_reconfig_rss_queues - change number of queues for rss and rebuild + * @pf: board private structure + * @queue_count: the requested queue count for rss. + * + * returns 0 if rss is not enabled, if enabled returns the final rss queue + * count which may be different from the requested queue count. + **/ +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + return 0; + + new_rss_size = min_t(int, queue_count, pf->rss_size_max); + + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; + i40e_prep_for_reset(pf); + + pf->rss_size = new_rss_size; + + i40e_reset_and_rebuild(pf, true); + i40e_config_rss(pf); + } + dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); + return pf->rss_size; +} + +/** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** + * i40e_sw_init - Initialize general software structures (struct i40e_pf) + * @pf: board private structure to initialize + * + * i40e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int i40e_sw_init(struct i40e_pf *pf) +{ + int err = 0; + int size; + + pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, + (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); + pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; + if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { + if (I40E_DEBUG_USER & debug) + pf->hw.debug_mask = debug; + pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), + I40E_DEFAULT_MSG_ENABLE); + } + + /* Set default capability flags */ + pf->flags = I40E_FLAG_RX_CSUM_ENABLED | + I40E_FLAG_MSI_ENABLED | + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; + + /* Set default ITR */ + pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; + pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; + + /* Depending on PF configurations, it is possible that the RSS + * maximum might end up larger than the available queues + */ + pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; + pf->rss_size_max = min_t(int, pf->rss_size_max, + pf->hw.func_caps.num_tx_qp); + if (pf->hw.func_caps.rss) { + pf->flags |= I40E_FLAG_RSS_ENABLED; + pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + } + + /* MFP mode enabled */ + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + pf->flags |= I40E_FLAG_MFP_ENABLED; + dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); + } + + /* FW/NVM is not yet fixed in this regard */ + if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || + (pf->hw.func_caps.fd_filters_best_effort > 0)) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; + /* Setup a counter for fd_atr per PF */ + pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* Setup a counter for fd_sb per PF */ + pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); + } else { + dev_info(&pf->pdev->dev, + "Flow Director Sideband mode Disabled in MFP mode\n"); + } + pf->fdir_pf_filter_count = + pf->hw.func_caps.fd_filters_guaranteed; + pf->hw.fdir_shared_filter_count = + pf->hw.func_caps.fd_filters_best_effort; + } + + if (pf->hw.func_caps.vmdq) { + pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; + pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; + } + +#ifdef I40E_FCOE + err = i40e_init_pf_fcoe(pf); + if (err) + dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); + +#endif /* I40E_FCOE */ +#ifdef CONFIG_PCI_IOV + if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { + pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; + pf->flags |= I40E_FLAG_SRIOV_ENABLED; + pf->num_req_vfs = min_t(int, + pf->hw.func_caps.num_vfs, + I40E_MAX_VF_COUNT); + } +#endif /* CONFIG_PCI_IOV */ + pf->eeprom_version = 0xDEAD; + pf->lan_veb = I40E_NO_VEB; + pf->lan_vsi = I40E_NO_VSI; + + /* set up queue assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); + pf->qp_pile = kzalloc(size, GFP_KERNEL); + if (!pf->qp_pile) { + err = -ENOMEM; + goto sw_init_done; + } + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; + pf->qp_pile->search_hint = 0; + + pf->tx_timeout_recovery_level = 1; + + mutex_init(&pf->switch_mutex); + + /* If NPAR is enabled nudge the Tx scheduler */ + if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) + i40e_set_npar_bw_setting(pf); + +sw_init_done: + return err; +} + +/** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ + bool need_reset = false; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (features & NETIF_F_NTUPLE) { + /* Enable filters and mark for reset */ + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + need_reset = true; + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { + /* turn off filters, mark for reset and clear SW filter list */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + need_reset = true; + i40e_fdir_filter_exit(pf); + } + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; + pf->fdir_pf_active_filters = 0; + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + /* if ATR was auto disabled it can be re-enabled. */ + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + return need_reset; +} + +/** + * i40e_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + **/ +static int i40e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + bool need_reset; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); + + need_reset = i40e_set_ntuple(pf, features); + + if (need_reset) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + return 0; +} + +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * @pf: board private structure + * @port: The UDP port to look up + * + * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found + **/ +static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +{ + u8 i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return i; +} + +/** + * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: New UDP port number that VXLAN started listening to + **/ +static void i40e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "vxlan port %d already offloaded\n", + ntohs(port)); + return; + } + + /* Now check if there is space to add the new port */ + next_idx = i40e_get_vxlan_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", + ntohs(port)); + return; + } + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[next_idx] = port; + pf->pending_vxlan_bitmap |= (1 << next_idx); + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); +} + +/** + * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + **/ +static void i40e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->vxlan_ports[idx] = 0; + pf->pending_vxlan_bitmap |= (1 << idx); + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", + ntohs(port)); + } else { + netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", + ntohs(port)); + } +} + +#endif +static int i40e_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + + if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) + return -EOPNOTSUPP; + + ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); + memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); + + return 0; +} + +/** + * i40e_ndo_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @flags: instructions from stack about fdb operation + */ +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + int err = 0; + + if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + if (vid) { + pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); + return -EINVAL; + } + + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + netdev_info(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask, int nlflags) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, int nlflags) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + nlflags); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + +static const struct net_device_ops i40e_netdev_ops = { + .ndo_open = i40e_open, + .ndo_stop = i40e_close, + .ndo_start_xmit = i40e_lan_xmit_frame, + .ndo_get_stats64 = i40e_get_netdev_stats_struct, + .ndo_set_rx_mode = i40e_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40e_set_mac, + .ndo_change_mtu = i40e_change_mtu, + .ndo_do_ioctl = i40e_ioctl, + .ndo_tx_timeout = i40e_tx_timeout, + .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40e_netpoll, +#endif + .ndo_setup_tc = i40e_setup_tc, +#ifdef I40E_FCOE + .ndo_fcoe_enable = i40e_fcoe_enable, + .ndo_fcoe_disable = i40e_fcoe_disable, +#endif + .ndo_set_features = i40e_set_features, + .ndo_set_vf_mac = i40e_ndo_set_vf_mac, + .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, + .ndo_set_vf_rate = i40e_ndo_set_vf_bw, + .ndo_get_vf_config = i40e_ndo_get_vf_config, + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, + .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, +#ifdef CONFIG_I40E_VXLAN + .ndo_add_vxlan_port = i40e_add_vxlan_port, + .ndo_del_vxlan_port = i40e_del_vxlan_port, +#endif + .ndo_get_phys_port_id = i40e_get_phys_port_id, + .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +}; + +/** + * i40e_config_netdev - Setup the netdev flags + * @vsi: the VSI being configured + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_config_netdev(struct i40e_vsi *vsi) +{ + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int etherdev_size; + + etherdev_size = sizeof(struct i40e_netdev_priv); + netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO; + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_RXHASH | + 0; + + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + netdev->features |= NETIF_F_NTUPLE; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + + if (vsi->type == I40E_VSI_MAIN) { + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, hw->mac.perm_addr); + /* The following steps are necessary to prevent reception + * of tagged packets - some older NVM configurations load a + * default a MAC-VLAN filter that accepts any tagged packet + * which must be replaced by a normal filter. + */ + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + i40e_add_filter(vsi, mac_addr, + I40E_VLAN_ANY, false, true); + } else { + /* relate the VSI_VMDQ name to the VSI_MAIN name */ + snprintf(netdev->name, IFNAMSIZ, "%sv%%d", + pf->vsi[pf->lan_vsi]->netdev->name); + random_ether_addr(mac_addr); + i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + } + i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + + ether_addr_copy(netdev->dev_addr, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + /* vlan gets same features (except vlan offload) + * after any tweaks for specific VSI types + */ + netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + /* Setup netdev TC information */ + i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); + + netdev->netdev_ops = &i40e_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + i40e_set_ethtool_ops(netdev); +#ifdef I40E_FCOE + i40e_fcoe_config_netdev(netdev, vsi); +#endif + + return 0; +} + +/** + * i40e_vsi_delete - Delete a VSI from the switch + * @vsi: the VSI being removed + * + * Returns 0 on success, negative value on failure + **/ +static void i40e_vsi_delete(struct i40e_vsi *vsi) +{ + /* remove default VSI is not allowed */ + if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) + return; + + i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); +} + +/** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** + * i40e_add_vsi - Add a VSI to the switch + * @vsi: the VSI being configured + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command. + **/ +static int i40e_add_vsi(struct i40e_vsi *vsi) +{ + int ret = -ENODEV; + struct i40e_mac_filter *f, *ftmp; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi_context ctxt; + u8 enabled_tc = 0x1; /* TC0 enabled */ + int f_count = 0; + + memset(&ctxt, 0, sizeof(ctxt)); + switch (vsi->type) { + case I40E_VSI_MAIN: + /* The PF's main VSI is already setup as part of the + * device initialization, so we'll not bother with + * the add_vsi call, but we will retrieve the current + * VSI context. + */ + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi config, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + return -ENOENT; + } + vsi->info = ctxt.info; + vsi->info.valid_sections = 0; + + vsi->seid = ctxt.seid; + vsi->id = ctxt.vsi_number; + + enabled_tc = i40e_pf_get_tc_map(pf); + + /* MFP mode setup queue map and update VSI */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && + !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "update vsi failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + ret = -ENOENT; + goto err; + } + /* update the local VSI info queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + } else { + /* Default/Main VSI is only enabled for TC0 + * reconfigure it to enable all TCs that are + * available on the port in SFP mode. + * For MFP case the iSCSI PF would use this + * flow to enable LAN+iSCSI TC. + */ + ret = i40e_vsi_config_tc(vsi, enabled_tc); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", + enabled_tc, ret, + pf->hw.aq.asq_last_status); + ret = -ENOENT; + } + } + break; + + case I40E_VSI_FDIR: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + (i40e_is_vsi_uplink_mode_veb(vsi))) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + + case I40E_VSI_VMDQ2: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; + + /* This VSI is connected to VEB so the switch_id + * should be set to zero by default. + */ + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Setup the VSI tx/rx queue map for TC0 only for now */ + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + + case I40E_VSI_SRIOV: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + + /* This VSI is connected to VEB so the switch_id + * should be set to zero by default. + */ + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + if (pf->vf[vsi->vf_id].spoofchk) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + ctxt.info.sec_flags |= + (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); + } + /* Setup the VSI tx/rx queue map for TC0 only for now */ + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + ret = i40e_fcoe_vsi_init(vsi, &ctxt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); + return ret; + } + break; + +#endif /* I40E_FCOE */ + default: + return -ENODEV; + } + + if (vsi->type != I40E_VSI_MAIN) { + ret = i40e_aq_add_vsi(hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add vsi failed, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + ret = -ENOENT; + goto err; + } + vsi->info = ctxt.info; + vsi->info.valid_sections = 0; + vsi->seid = ctxt.seid; + vsi->id = ctxt.vsi_number; + } + + /* If macvlan filters already exist, force them to get loaded */ + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + f->changed = true; + f_count++; + + if (f->is_laa && vsi->type == I40E_VSI_MAIN) { + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, f->macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + f->macaddr, NULL); + } + } + if (f_count) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + pf->flags |= I40E_FLAG_FILTER_SYNC; + } + + /* Update VSI BW information */ + ret = i40e_vsi_get_bw_info(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't get vsi bw info, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + /* VSI is already added so not tearing that up */ + ret = 0; + } + +err: + return ret; +} + +/** + * i40e_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + **/ +int i40e_vsi_release(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + struct i40e_veb *veb = NULL; + struct i40e_pf *pf; + u16 uplink_seid; + int i, n; + + pf = vsi->back; + + /* release of a VEB-owner or last VSI is not allowed */ + if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { + dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", + vsi->seid, vsi->uplink_seid); + return -ENODEV; + } + if (vsi == pf->vsi[pf->lan_vsi] && + !test_bit(__I40E_DOWN, &pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); + return -ENODEV; + } + + uplink_seid = vsi->uplink_seid; + if (vsi->type != I40E_VSI_SRIOV) { + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + if (vsi->netdev) { + /* results in a call to i40e_close() */ + unregister_netdev(vsi->netdev); + } + } else { + i40e_vsi_close(vsi); + } + i40e_vsi_disable_irq(vsi); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, f->vlan, + f->is_vf, f->is_netdev); + i40e_sync_vsi_filters(vsi); + + i40e_vsi_delete(vsi); + i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + i40e_vsi_clear_rings(vsi); + i40e_vsi_clear(vsi); + + /* If this was the last thing on the VEB, except for the + * controlling VSI, remove the VEB, which puts the controlling + * VSI onto the next level down in the switch. + * + * Well, okay, there's one more exception here: don't remove + * the orphan VEBs yet. We'll wait for an explicit remove request + * from up the network stack. + */ + for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && + pf->vsi[i]->uplink_seid == uplink_seid && + (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { + n++; /* count the VSIs */ + } + } + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + if (pf->veb[i]->uplink_seid == uplink_seid) + n++; /* count the VEBs */ + if (pf->veb[i]->seid == uplink_seid) + veb = pf->veb[i]; + } + if (n == 0 && veb && veb->uplink_seid != 0) + i40e_veb_release(veb); + + return 0; +} + +/** + * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI + * @vsi: ptr to the VSI + * + * This should only be called after i40e_vsi_mem_alloc() which allocates the + * corresponding SW VSI structure and initializes num_queue_pairs for the + * newly allocated VSI. + * + * Returns 0 on success or negative on failure + **/ +static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) +{ + int ret = -ENOENT; + struct i40e_pf *pf = vsi->back; + + if (vsi->q_vectors[0]) { + dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->seid); + return -EEXIST; + } + + if (vsi->base_vector) { + dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", + vsi->seid, vsi->base_vector); + return -EEXIST; + } + + ret = i40e_vsi_alloc_q_vectors(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->seid, ret); + vsi->num_q_vectors = 0; + goto vector_setup_out; + } + + if (vsi->num_q_vectors) + vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, + vsi->num_q_vectors, vsi->idx); + if (vsi->base_vector < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d vectors for VSI %d, err=%d\n", + vsi->num_q_vectors, vsi->seid, vsi->base_vector); + i40e_vsi_free_q_vectors(vsi); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +/** + * i40e_vsi_reinit_setup - return and reallocate resources for a VSI + * @vsi: pointer to the vsi. + * + * This re-allocates a vsi's queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct + * on success, otherwise returns NULL on failure. + **/ +static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + u8 enabled_tc; + int ret; + + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); + i40e_vsi_clear_rings(vsi); + + i40e_vsi_free_arrays(vsi, false); + i40e_set_num_rings_in_vsi(vsi); + ret = i40e_vsi_alloc_arrays(vsi, false); + if (ret) + goto err_vsi; + + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); + if (ret < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); + goto err_vsi; + } + vsi->base_queue = ret; + + /* Update the FW view of the VSI. Force a reset of TC and queue + * layout configurations. + */ + enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; + pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; + i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + + /* assign it some queues */ + ret = i40e_alloc_rings(vsi); + if (ret) + goto err_rings; + + /* map all of the rings to the q_vectors */ + i40e_vsi_map_rings_to_vectors(vsi); + return vsi; + +err_rings: + i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: + i40e_vsi_clear(vsi); + return NULL; +} + +/** + * i40e_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @type: VSI type + * @uplink_seid: the switch element to link to + * @param1: usage depends upon VSI type. For VF types, indicates VF id + * + * This allocates the sw VSI structure and its queue resources, then add a VSI + * to the identified VEB. + * + * Returns pointer to the successfully allocated and configure VSI sw struct on + * success, otherwise returns NULL on failure. + **/ +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, + u16 uplink_seid, u32 param1) +{ + struct i40e_vsi *vsi = NULL; + struct i40e_veb *veb = NULL; + int ret, i; + int v_idx; + + /* The requested uplink_seid must be either + * - the PF's port seid + * no VEB is needed because this is the PF + * or this is a Flow Director special case VSI + * - seid of an existing VEB + * - seid of a VSI that owns an existing VEB + * - seid of a VSI that doesn't own a VEB + * a new VEB is created and the VSI becomes the owner + * - seid of the PF VSI, which is what creates the first VEB + * this is a special case of the previous + * + * Find which uplink_seid we were given and create a new VEB if needed + */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { + veb = pf->veb[i]; + break; + } + } + + if (!veb && uplink_seid != pf->mac_seid) { + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { + vsi = pf->vsi[i]; + break; + } + } + if (!vsi) { + dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", + uplink_seid); + return NULL; + } + + if (vsi->uplink_seid == pf->mac_seid) + veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, + vsi->tc_config.enabled_tc); + else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + dev_info(&vsi->back->pdev->dev, + "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", + __func__); + return NULL; + } + /* We come up by default in VEPA mode if SRIOV is not + * already enabled, in which case we can't force VEPA + * mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + } + i40e_config_bridge_mode(veb); + } + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + if (!veb) { + dev_info(&pf->pdev->dev, "couldn't add VEB\n"); + return NULL; + } + + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + uplink_seid = veb->seid; + } + + /* get vsi sw struct */ + v_idx = i40e_vsi_mem_alloc(pf, type); + if (v_idx < 0) + goto err_alloc; + vsi = pf->vsi[v_idx]; + if (!vsi) + goto err_alloc; + vsi->type = type; + vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); + + if (type == I40E_VSI_MAIN) + pf->lan_vsi = v_idx; + else if (type == I40E_VSI_SRIOV) + vsi->vf_id = param1; + /* assign it some queues */ + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, + vsi->idx); + if (ret < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); + goto err_vsi; + } + vsi->base_queue = ret; + + /* get a VSI from the hardware */ + vsi->uplink_seid = uplink_seid; + ret = i40e_add_vsi(vsi); + if (ret) + goto err_vsi; + + switch (vsi->type) { + /* setup the netdev if needed */ + case I40E_VSI_MAIN: + case I40E_VSI_VMDQ2: + case I40E_VSI_FCOE: + ret = i40e_config_netdev(vsi); + if (ret) + goto err_netdev; + ret = register_netdev(vsi->netdev); + if (ret) + goto err_netdev; + vsi->netdev_registered = true; + netif_carrier_off(vsi->netdev); +#ifdef CONFIG_I40E_DCB + /* Setup DCB netlink interface */ + i40e_dcbnl_setup(vsi); +#endif /* CONFIG_I40E_DCB */ + /* fall through */ + + case I40E_VSI_FDIR: + /* set up vectors and rings if needed */ + ret = i40e_vsi_setup_vectors(vsi); + if (ret) + goto err_msix; + + ret = i40e_alloc_rings(vsi); + if (ret) + goto err_rings; + + /* map all of the rings to the q_vectors */ + i40e_vsi_map_rings_to_vectors(vsi); + + i40e_vsi_reset_stats(vsi); + break; + + default: + /* no netdev or rings for the other VSI types */ + break; + } + + return vsi; + +err_rings: + i40e_vsi_free_q_vectors(vsi); +err_msix: + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_netdev: + i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: + i40e_vsi_clear(vsi); +err_alloc: + return NULL; +} + +/** + * i40e_veb_get_bw_info - Query VEB BW information + * @veb: the veb to query + * + * Query the Tx scheduler BW configuration data for given VEB + **/ +static int i40e_veb_get_bw_info(struct i40e_veb *veb) +{ + struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; + struct i40e_pf *pf = veb->pf; + struct i40e_hw *hw = &pf->hw; + u32 tc_bw_max; + int ret = 0; + int i; + + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "query veb bw config failed, aq_err=%d\n", + hw->aq.asq_last_status); + goto out; + } + + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "query veb bw ets config failed, aq_err=%d\n", + hw->aq.asq_last_status); + goto out; + } + + veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); + veb->bw_max_quanta = ets_data.tc_bw_max; + veb->is_abs_credits = bw_data.absolute_credits_enable; + veb->enabled_tc = ets_data.tc_valid_bits; + tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | + (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; + veb->bw_tc_limit_credits[i] = + le16_to_cpu(bw_data.tc_bw_limits[i]); + veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); + } + +out: + return ret; +} + +/** + * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF + * @pf: board private structure + * + * On error: returns error code (negative) + * On success: returns vsi index in PF (positive) + **/ +static int i40e_veb_mem_alloc(struct i40e_pf *pf) +{ + int ret = -ENOENT; + struct i40e_veb *veb; + int i; + + /* Need to protect the allocation of switch elements at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* VEB list may be fragmented if VEB creation/destruction has + * been happening. We can afford to do a quick scan to look + * for any free slots in the list. + * + * find next empty veb slot, looping back around if necessary + */ + i = 0; + while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) + i++; + if (i >= I40E_MAX_VEB) { + ret = -ENOMEM; + goto err_alloc_veb; /* out of VEB slots! */ + } + + veb = kzalloc(sizeof(*veb), GFP_KERNEL); + if (!veb) { + ret = -ENOMEM; + goto err_alloc_veb; + } + veb->pf = pf; + veb->idx = i; + veb->enabled_tc = 1; + + pf->veb[i] = veb; + ret = i; +err_alloc_veb: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +/** + * i40e_switch_branch_release - Delete a branch of the switch tree + * @branch: where to start deleting + * + * This uses recursion to find the tips of the branch to be + * removed, deleting until we get back to and can delete this VEB. + **/ +static void i40e_switch_branch_release(struct i40e_veb *branch) +{ + struct i40e_pf *pf = branch->pf; + u16 branch_seid = branch->seid; + u16 veb_idx = branch->idx; + int i; + + /* release any VEBs on this VEB - RECURSION */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + if (pf->veb[i]->uplink_seid == branch->seid) + i40e_switch_branch_release(pf->veb[i]); + } + + /* Release the VSIs on this VEB, but not the owner VSI. + * + * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing + * the VEB itself, so don't use (*branch) after this loop. + */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + if (pf->vsi[i]->uplink_seid == branch_seid && + (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { + i40e_vsi_release(pf->vsi[i]); + } + } + + /* There's one corner case where the VEB might not have been + * removed, so double check it here and remove it if needed. + * This case happens if the veb was created from the debugfs + * commands and no VSIs were added to it. + */ + if (pf->veb[veb_idx]) + i40e_veb_release(pf->veb[veb_idx]); +} + +/** + * i40e_veb_clear - remove veb struct + * @veb: the veb to remove + **/ +static void i40e_veb_clear(struct i40e_veb *veb) +{ + if (!veb) + return; + + if (veb->pf) { + struct i40e_pf *pf = veb->pf; + + mutex_lock(&pf->switch_mutex); + if (pf->veb[veb->idx] == veb) + pf->veb[veb->idx] = NULL; + mutex_unlock(&pf->switch_mutex); + } + + kfree(veb); +} + +/** + * i40e_veb_release - Delete a VEB and free its resources + * @veb: the VEB being removed + **/ +void i40e_veb_release(struct i40e_veb *veb) +{ + struct i40e_vsi *vsi = NULL; + struct i40e_pf *pf; + int i, n = 0; + + pf = veb->pf; + + /* find the remaining VSI and check for extras */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { + n++; + vsi = pf->vsi[i]; + } + } + if (n != 1) { + dev_info(&pf->pdev->dev, + "can't remove VEB %d with %d VSIs left\n", + veb->seid, n); + return; + } + + /* move the remaining VSI to uplink veb */ + vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; + if (veb->uplink_seid) { + vsi->uplink_seid = veb->uplink_seid; + if (veb->uplink_seid == pf->mac_seid) + vsi->veb_idx = I40E_NO_VEB; + else + vsi->veb_idx = veb->veb_idx; + } else { + /* floating VEB */ + vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; + vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; + } + + i40e_aq_delete_element(&pf->hw, veb->seid, NULL); + i40e_veb_clear(veb); +} + +/** + * i40e_add_veb - create the VEB in the switch + * @veb: the VEB to be instantiated + * @vsi: the controlling VSI + **/ +static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) +{ + bool is_default = false; + bool is_cloud = false; + int ret; + + /* get a VEB from the hardware */ + ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, + veb->enabled_tc, is_default, + is_cloud, &veb->seid, NULL); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't add VEB, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + return -EPERM; + } + + /* get statistics counter */ + ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, + &veb->stats_idx, NULL, NULL, NULL); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't get VEB statistics idx, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + return -EPERM; + } + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't get VEB bw info, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); + return -ENOENT; + } + + vsi->uplink_seid = veb->seid; + vsi->veb_idx = veb->idx; + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + + return 0; +} + +/** + * i40e_veb_setup - Set up a VEB + * @pf: board private structure + * @flags: VEB setup flags + * @uplink_seid: the switch element to link to + * @vsi_seid: the initial VSI seid + * @enabled_tc: Enabled TC bit-map + * + * This allocates the sw VEB structure and links it into the switch + * It is possible and legal for this to be a duplicate of an already + * existing VEB. It is also possible for both uplink and vsi seids + * to be zero, in order to create a floating VEB. + * + * Returns pointer to the successfully allocated VEB sw struct on + * success, otherwise returns NULL on failure. + **/ +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, + u16 uplink_seid, u16 vsi_seid, + u8 enabled_tc) +{ + struct i40e_veb *veb, *uplink_veb = NULL; + int vsi_idx, veb_idx; + int ret; + + /* if one seid is 0, the other must be 0 to create a floating relay */ + if ((uplink_seid == 0 || vsi_seid == 0) && + (uplink_seid + vsi_seid != 0)) { + dev_info(&pf->pdev->dev, + "one, not both seid's are 0: uplink=%d vsi=%d\n", + uplink_seid, vsi_seid); + return NULL; + } + + /* make sure there is such a vsi and uplink */ + for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) + if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) + break; + if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { + dev_info(&pf->pdev->dev, "vsi seid %d not found\n", + vsi_seid); + return NULL; + } + + if (uplink_seid && uplink_seid != pf->mac_seid) { + for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { + if (pf->veb[veb_idx] && + pf->veb[veb_idx]->seid == uplink_seid) { + uplink_veb = pf->veb[veb_idx]; + break; + } + } + if (!uplink_veb) { + dev_info(&pf->pdev->dev, + "uplink seid %d not found\n", uplink_seid); + return NULL; + } + } + + /* get veb sw struct */ + veb_idx = i40e_veb_mem_alloc(pf); + if (veb_idx < 0) + goto err_alloc; + veb = pf->veb[veb_idx]; + veb->flags = flags; + veb->uplink_seid = uplink_seid; + veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); + veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); + + /* create the VEB in the switch */ + ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); + if (ret) + goto err_veb; + if (vsi_idx == pf->lan_vsi) + pf->lan_veb = veb->idx; + + return veb; + +err_veb: + i40e_veb_clear(veb); +err_alloc: + return NULL; +} + +/** + * i40e_setup_pf_switch_element - set PF vars based on switch type + * @pf: board private structure + * @ele: element we are building info from + * @num_reported: total number of elements + * @printconfig: should we print the contents + * + * helper function to assist in extracting a few useful SEID values. + **/ +static void i40e_setup_pf_switch_element(struct i40e_pf *pf, + struct i40e_aqc_switch_config_element_resp *ele, + u16 num_reported, bool printconfig) +{ + u16 downlink_seid = le16_to_cpu(ele->downlink_seid); + u16 uplink_seid = le16_to_cpu(ele->uplink_seid); + u8 element_type = ele->element_type; + u16 seid = le16_to_cpu(ele->seid); + + if (printconfig) + dev_info(&pf->pdev->dev, + "type=%d seid=%d uplink=%d downlink=%d\n", + element_type, seid, uplink_seid, downlink_seid); + + switch (element_type) { + case I40E_SWITCH_ELEMENT_TYPE_MAC: + pf->mac_seid = seid; + break; + case I40E_SWITCH_ELEMENT_TYPE_VEB: + /* Main VEB? */ + if (uplink_seid != pf->mac_seid) + break; + if (pf->lan_veb == I40E_NO_VEB) { + int v; + + /* find existing or else empty VEB */ + for (v = 0; v < I40E_MAX_VEB; v++) { + if (pf->veb[v] && (pf->veb[v]->seid == seid)) { + pf->lan_veb = v; + break; + } + } + if (pf->lan_veb == I40E_NO_VEB) { + v = i40e_veb_mem_alloc(pf); + if (v < 0) + break; + pf->lan_veb = v; + } + } + + pf->veb[pf->lan_veb]->seid = seid; + pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; + pf->veb[pf->lan_veb]->pf = pf; + pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; + break; + case I40E_SWITCH_ELEMENT_TYPE_VSI: + if (num_reported != 1) + break; + /* This is immediately after a reset so we can assume this is + * the PF's VSI + */ + pf->mac_seid = uplink_seid; + pf->pf_seid = downlink_seid; + pf->main_vsi_seid = seid; + if (printconfig) + dev_info(&pf->pdev->dev, + "pf_seid=%d main_vsi_seid=%d\n", + pf->pf_seid, pf->main_vsi_seid); + break; + case I40E_SWITCH_ELEMENT_TYPE_PF: + case I40E_SWITCH_ELEMENT_TYPE_VF: + case I40E_SWITCH_ELEMENT_TYPE_EMP: + case I40E_SWITCH_ELEMENT_TYPE_BMC: + case I40E_SWITCH_ELEMENT_TYPE_PE: + case I40E_SWITCH_ELEMENT_TYPE_PA: + /* ignore these for now */ + break; + default: + dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", + element_type, seid); + break; + } +} + +/** + * i40e_fetch_switch_configuration - Get switch config from firmware + * @pf: board private structure + * @printconfig: should we print the contents + * + * Get the current switch configuration from the device and + * extract a few useful SEID values. + **/ +int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) +{ + struct i40e_aqc_get_switch_config_resp *sw_config; + u16 next_seid = 0; + int ret = 0; + u8 *aq_buf; + int i; + + aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); + if (!aq_buf) + return -ENOMEM; + + sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; + do { + u16 num_reported, num_total; + + ret = i40e_aq_get_switch_config(&pf->hw, sw_config, + I40E_AQ_LARGE_BUF, + &next_seid, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "get switch config failed %d aq_err=%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(aq_buf); + return -ENOENT; + } + + num_reported = le16_to_cpu(sw_config->header.num_reported); + num_total = le16_to_cpu(sw_config->header.num_total); + + if (printconfig) + dev_info(&pf->pdev->dev, + "header: %d reported %d total\n", + num_reported, num_total); + + for (i = 0; i < num_reported; i++) { + struct i40e_aqc_switch_config_element_resp *ele = + &sw_config->element[i]; + + i40e_setup_pf_switch_element(pf, ele, num_reported, + printconfig); + } + } while (next_seid != 0); + + kfree(aq_buf); + return ret; +} + +/** + * i40e_setup_pf_switch - Setup the HW switch on startup or after reset + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) +{ + int ret; + + /* find out what's out there already */ + ret = i40e_fetch_switch_configuration(pf, false); + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't fetch switch config, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + return ret; + } + i40e_pf_reset_stats(pf); + + /* first time setup */ + if (pf->lan_vsi == I40E_NO_VSI || reinit) { + struct i40e_vsi *vsi = NULL; + u16 uplink_seid; + + /* Set up the PF VSI associated with the PF's main VSI + * that is already in the HW switch + */ + if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + uplink_seid = pf->veb[pf->lan_veb]->seid; + else + uplink_seid = pf->mac_seid; + if (pf->lan_vsi == I40E_NO_VSI) + vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + else if (reinit) + vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); + if (!vsi) { + dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); + i40e_fdir_teardown(pf); + return -EAGAIN; + } + } else { + /* force a reset of TC and queue layout configurations */ + u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; + pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; + i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + } + i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + + i40e_fdir_sb_setup(pf); + + /* Setup static PF queue filter control settings */ + ret = i40e_setup_pf_filter_control(pf); + if (ret) { + dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", + ret); + /* Failure here should not stop continuing other steps */ + } + + /* enable RSS in the HW, even for only one queue, as the stack can use + * the hash + */ + if ((pf->flags & I40E_FLAG_RSS_ENABLED)) + i40e_config_rss(pf); + + /* fill in link information and enable LSE reporting */ + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + i40e_link_event(pf); + + /* Initialize user-specific link properties */ + pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & + I40E_AQ_AN_COMPLETED) ? true : false); + + i40e_ptp_init(pf); + + return ret; +} + +/** + * i40e_determine_queue_usage - Work out queue distribution + * @pf: board private structure + **/ +static void i40e_determine_queue_usage(struct i40e_pf *pf) +{ + int queues_left; + + pf->num_lan_qps = 0; +#ifdef I40E_FCOE + pf->num_fcoe_qps = 0; +#endif + + /* Find the max queues to be put into basic use. We'll always be + * using TC0, whether or not DCB is running, and TC0 will get the + * big RSS set. + */ + queues_left = pf->hw.func_caps.num_tx_qp; + + if ((queues_left == 1) || + !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + /* one qp for PF, no queues for anything else */ + queues_left = 0; + pf->rss_size = pf->num_lan_qps = 1; + + /* make sure all the fancies are disabled */ + pf->flags &= ~(I40E_FLAG_RSS_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_CAPABLE))) { + /* one qp for PF */ + pf->rss_size = pf->num_lan_qps = 1; + queues_left -= pf->num_lan_qps; + + pf->flags &= ~(I40E_FLAG_RSS_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + } else { + /* Not enough queues for all TCs */ + if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && + (queues_left < I40E_MAX_TRAFFIC_CLASS)) { + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); + } + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + + queues_left -= pf->num_lan_qps; + } + +#ifdef I40E_FCOE + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + if (I40E_DEFAULT_FCOE <= queues_left) { + pf->num_fcoe_qps = I40E_DEFAULT_FCOE; + } else if (I40E_MINIMUM_FCOE <= queues_left) { + pf->num_fcoe_qps = I40E_MINIMUM_FCOE; + } else { + pf->num_fcoe_qps = 0; + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); + } + + queues_left -= pf->num_fcoe_qps; + } + +#endif + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (queues_left > 1) { + queues_left -= 1; /* save 1 queue for FD */ + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); + } + } + + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + pf->num_vf_qps && pf->num_req_vfs && queues_left) { + pf->num_req_vfs = min_t(int, pf->num_req_vfs, + (queues_left / pf->num_vf_qps)); + queues_left -= (pf->num_req_vfs * pf->num_vf_qps); + } + + if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { + pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, + (queues_left / pf->num_vmdq_qps)); + queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); + } + + pf->queues_left = queues_left; +#ifdef I40E_FCOE + dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); +#endif +} + +/** + * i40e_setup_pf_filter_control - Setup PF static filter control + * @pf: PF to be setup + * + * i40e_setup_pf_filter_control sets up a PF's initial filter control + * settings. If PE/FCoE are enabled then it will also set the per PF + * based filter sizes required for them. It also enables Flow director, + * ethertype and macvlan type filter settings for the pf. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_setup_pf_filter_control(struct i40e_pf *pf) +{ + struct i40e_filter_control_settings *settings = &pf->filter_settings; + + settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; + + /* Flow Director is enabled */ + if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) + settings->enable_fdir = true; + + /* Ethtype and MACVLAN filters enabled for PF */ + settings->enable_ethtype = true; + settings->enable_macvlan = true; + + if (i40e_set_filter_control(&pf->hw, settings)) + return -ENOENT; + + return 0; +} + +#define INFO_STRING_LEN 255 +static void i40e_print_features(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + char *buf, *string; + + string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!string) { + dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + return; + } + + buf = string; + + buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); +#ifdef CONFIG_PCI_IOV + buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); +#endif + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + + if (pf->flags & I40E_FLAG_RSS_ENABLED) + buf += sprintf(buf, "RSS "); + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + buf += sprintf(buf, "FD_ATR "); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + buf += sprintf(buf, "FD_SB "); + buf += sprintf(buf, "NTUPLE "); + } + if (pf->flags & I40E_FLAG_DCB_CAPABLE) + buf += sprintf(buf, "DCB "); + if (pf->flags & I40E_FLAG_PTP) + buf += sprintf(buf, "PTP "); +#ifdef I40E_FCOE + if (pf->flags & I40E_FLAG_FCOE_ENABLED) + buf += sprintf(buf, "FCOE "); +#endif + + BUG_ON(buf > (string + INFO_STRING_LEN)); + dev_info(&pf->pdev->dev, "%s\n", string); + kfree(string); +} + +/** + * i40e_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in i40e_pci_tbl + * + * i40e_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + unsigned long ioremap_len; + struct i40e_pf *pf; + struct i40e_hw *hw; + static u16 pfs_found; + u16 link_status; + int err = 0; + u32 len; + u32 i; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), i40e_driver_name); + if (err) { + dev_info(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_vsi = 0; + pf->pdev = pdev; + set_bit(__I40E_DOWN, &pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr) { + err = -EIO; + dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), + (unsigned int)pci_resource_len(pdev, 0), err); + goto err_ioremap; + } + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + pf->instance = pfs_found; + + if (debug != -1) { + pf->msg_enable = pf->hw.debug_mask; + pf->msg_enable = debug; + } + + /* do a special CORER for clearing PXE mode once at init */ + if (hw->revision_id == 0 && + (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); + i40e_flush(hw); + msleep(200); + pf->corer_count++; + + i40e_clear_pxe_mode(hw); + } + + /* Reset here to make sure all is clean and to define PF 'n' */ + i40e_clear_hw(hw); + err = i40e_pf_reset(hw); + if (err) { + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + goto err_pf_reset; + } + pf->pfr_count++; + + hw->aq.num_arq_entries = I40E_AQ_LEN; + hw->aq.num_asq_entries = I40E_AQ_LEN; + hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; + pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; + + snprintf(pf->int_name, sizeof(pf->int_name) - 1, + "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); + + err = i40e_init_shared_code(hw); + if (err) { + dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + goto err_pf_reset; + } + + /* set up a default setting for link flow control */ + pf->hw.fc.requested_mode = I40E_FC_NONE; + + err = i40e_init_adminq(hw); + dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); + if (err) { + dev_info(&pdev->dev, + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + goto err_pf_reset; + } + + if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) + dev_info(&pdev->dev, + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || + hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) + dev_info(&pdev->dev, + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + + i40e_verify_eeprom(pf); + + /* Rev 0 hardware was never productized */ + if (hw->revision_id < 1) + dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); + + i40e_clear_pxe_mode(hw); + err = i40e_get_capabilities(pf); + if (err) + goto err_adminq_setup; + + err = i40e_sw_init(pf); + if (err) { + dev_info(&pdev->dev, "sw_init failed: %d\n", err); + goto err_sw_init; + } + + err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, + pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + if (err) { + dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); + goto err_init_lan_hmc; + } + + err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (err) { + dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); + err = -ENOENT; + goto err_configure_lan_hmc; + } + + /* Disable LLDP for NICs that have firmware versions lower than v4.3. + * Ignore error return codes because if it was already disabled via + * hardware settings this will fail + */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || + (pf->hw.aq.fw_maj_ver < 4)) { + dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); + i40e_aq_stop_lldp(hw, true, NULL); + } + + i40e_get_mac_addr(hw, hw->mac.addr); + if (!is_valid_ether_addr(hw->mac.addr)) { + dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); + err = -EIO; + goto err_mac_addr; + } + dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); + ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); + i40e_get_port_mac_addr(hw, hw->mac.port_addr); + if (is_valid_ether_addr(hw->mac.port_addr)) + pf->flags |= I40E_FLAG_PORT_ID_VALID; +#ifdef I40E_FCOE + err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); + if (err) + dev_info(&pdev->dev, + "(non-fatal) SAN MAC retrieval failed: %d\n", err); + if (!is_valid_ether_addr(hw->mac.san_addr)) { + dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", + hw->mac.san_addr); + ether_addr_copy(hw->mac.san_addr, hw->mac.addr); + } + dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); +#endif /* I40E_FCOE */ + + pci_set_drvdata(pdev, pf); + pci_save_state(pdev); +#ifdef CONFIG_I40E_DCB + err = i40e_init_pf_dcb(pf); + if (err) { + dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + /* Continue without DCB enabled */ + } +#endif /* CONFIG_I40E_DCB */ + + /* set up periodic task facility */ + setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); + pf->service_timer_period = HZ; + + INIT_WORK(&pf->service_task, i40e_service_task); + clear_bit(__I40E_SERVICE_SCHED, &pf->state); + pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; + pf->link_check_timeout = jiffies; + + /* WoL defaults to disabled */ + pf->wol_en = false; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + + /* set up the main switch operations */ + i40e_determine_queue_usage(pf); + err = i40e_init_interrupt_scheme(pf); + if (err) + goto err_switch_setup; + + /* The number of VSIs reported by the FW is the minimum guaranteed + * to us; HW supports far more and we share the remaining pool with + * the other PFs. We allocate space for more than the guarantee with + * the understanding that we might not get them all later. + */ + if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) + pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; + else + pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; + + /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ + len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; + pf->vsi = kzalloc(len, GFP_KERNEL); + if (!pf->vsi) { + err = -ENOMEM; + goto err_switch_setup; + } + +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (pci_num_vf(pdev)) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + } +#endif + err = i40e_setup_pf_switch(pf, false); + if (err) { + dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); + goto err_vsis; + } + /* if FDIR VSI was set up, start it now */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + i40e_vsi_open(pf->vsi[i]); + break; + } + } + + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + err = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (err) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* The main driver is (mostly) up and happy. We need to set this state + * before setting up the misc vector or we get a race and the vector + * ends up disabled forever. + */ + clear_bit(__I40E_DOWN, &pf->state); + + /* In case of MSIX we are going to setup the misc vector right here + * to handle admin queue events etc. In case of legacy and MSI + * the misc functionality and queue processing is combined in + * the same vector and that gets setup at open. + */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + err = i40e_setup_misc_vector(pf); + if (err) { + dev_info(&pdev->dev, + "setup of misc vector failed: %d\n", err); + goto err_vsis; + } + } + +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + u32 val; + + /* disable link interrupts for VFs */ + val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); + i40e_flush(hw); + + if (pci_num_vf(pdev)) { + dev_info(&pdev->dev, + "Active VFs found, allocating resources.\n"); + err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); + if (err) + dev_info(&pdev->dev, + "Error %d allocating resources for existing VFs\n", + err); + } + } +#endif /* CONFIG_PCI_IOV */ + + pfs_found++; + + i40e_dbg_pf_init(pf); + + /* tell the firmware that we're starting */ + i40e_send_version(pf); + + /* since everything's happy, start the service_task timer */ + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + +#ifdef I40E_FCOE + /* create FCoE interface */ + i40e_fcoe_vsi_setup(pf); + +#endif + /* Get the negotiated link width and speed from PCI config space */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); + + i40e_set_pci_config_data(hw, link_status); + + dev_info(&pdev->dev, "PCI-Express: %s %s\n", + (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : + hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : + hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : + "Unknown"), + (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : + hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : + "Unknown")); + + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } + + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + + /* print a string summarizing features */ + i40e_print_features(pf); + + return 0; + + /* Unwind what we've done if something failed in the setup */ +err_vsis: + set_bit(__I40E_DOWN, &pf->state); + i40e_clear_interrupt_scheme(pf); + kfree(pf->vsi); +err_switch_setup: + i40e_reset_interrupt_capability(pf); + del_timer_sync(&pf->service_timer); +err_mac_addr: +err_configure_lan_hmc: + (void)i40e_shutdown_lan_hmc(hw); +err_init_lan_hmc: + kfree(pf->qp_pile); +err_sw_init: +err_adminq_setup: + (void)i40e_shutdown_adminq(hw); +err_pf_reset: + iounmap(hw->hw_addr); +err_ioremap: + kfree(pf); +err_pf_alloc: + pci_disable_pcie_error_reporting(pdev); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * i40e_remove - Device removal routine + * @pdev: PCI device information struct + * + * i40e_remove is called by the PCI subsystem to alert the driver + * that is should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void i40e_remove(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + i40e_status ret_code; + int i; + + i40e_dbg_pf_exit(pf); + + i40e_ptp_stop(pf); + + /* no more scheduling of any task */ + set_bit(__I40E_DOWN, &pf->state); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + } + + i40e_fdir_teardown(pf); + + /* If there is a switch structure or any orphans, remove them. + * This will leave only the PF's VSI remaining. + */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + + if (pf->veb[i]->uplink_seid == pf->mac_seid || + pf->veb[i]->uplink_seid == 0) + i40e_switch_branch_release(pf->veb[i]); + } + + /* Now we can shutdown the PF's VSI, just before we kill + * adminq and hmc. + */ + if (pf->vsi[pf->lan_vsi]) + i40e_vsi_release(pf->vsi[pf->lan_vsi]); + + /* shutdown and destroy the HMC */ + if (pf->hw.hmc.hmc_obj) { + ret_code = i40e_shutdown_lan_hmc(&pf->hw); + if (ret_code) + dev_warn(&pdev->dev, + "Failed to destroy the HMC resources: %d\n", + ret_code); + } + + /* shutdown the adminq */ + ret_code = i40e_shutdown_adminq(&pf->hw); + if (ret_code) + dev_warn(&pdev->dev, + "Failed to destroy the Admin Queue resources: %d\n", + ret_code); + + /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + i40e_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i]) { + i40e_vsi_clear_rings(pf->vsi[i]); + i40e_vsi_clear(pf->vsi[i]); + pf->vsi[i] = NULL; + } + } + + for (i = 0; i < I40E_MAX_VEB; i++) { + kfree(pf->veb[i]); + pf->veb[i] = NULL; + } + + kfree(pf->qp_pile); + kfree(pf->vsi); + + iounmap(pf->hw.hw_addr); + kfree(pf); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); +} + +/** + * i40e_pci_error_detected - warning that something funky happened in PCI land + * @pdev: PCI device information struct + * + * Called to warn that something happened and the error handling steps + * are in progress. Allows the driver to quiesce things, be ready for + * remediation. + **/ +static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, + enum pci_channel_state error) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "%s: error %d\n", __func__, error); + + /* shutdown all operations */ + if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + } + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * i40e_pci_error_slot_reset - a PCI slot reset just happened + * @pdev: PCI device information struct + * + * Called to find if the driver can work with the device now that + * the pci slot has been reset. If a basic connection seems good + * (registers are readable and have sane content) then return a + * happy little PCI_ERS_RESULT_xxx. + **/ +static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + u32 reg; + + dev_info(&pdev->dev, "%s\n", __func__); + if (pci_enable_device_mem(pdev)) { + dev_info(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_wake_from_d3(pdev, false); + + reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); + if (reg == 0) + result = PCI_ERS_RESULT_RECOVERED; + else + result = PCI_ERS_RESULT_DISCONNECT; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_info(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * i40e_pci_error_resume - restart operations after PCI error recovery + * @pdev: PCI device information struct + * + * Called to allow the driver to bring things back up after PCI error + * and/or reset recovery has finished. + **/ +static void i40e_pci_error_resume(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "%s\n", __func__); + if (test_bit(__I40E_SUSPENDED, &pf->state)) + return; + + rtnl_lock(); + i40e_handle_reset_warning(pf); + rtnl_lock(); +} + +/** + * i40e_shutdown - PCI callback for shutting down + * @pdev: PCI device information struct + **/ +static void i40e_shutdown(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + i40e_clear_interrupt_scheme(pf); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +/** + * i40e_suspend - PCI callback for moving to D3 + * @pdev: PCI device information struct + **/ +static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * i40e_resume - PCI callback for waking up from D3 + * @pdev: PCI device information struct + **/ +static int i40e_resume(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state() clears dev->state_saves, so + * call pci_save_state() again to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "%s: Cannot enable PCI device from suspend\n", + __func__); + return err; + } + pci_set_master(pdev); + + /* no wakeup events while running */ + pci_wake_from_d3(pdev, false); + + /* handling the reset will rebuild the device state */ + if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { + clear_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_reset_and_rebuild(pf, false); + rtnl_unlock(); + } + + return 0; +} + +#endif +static const struct pci_error_handlers i40e_err_handler = { + .error_detected = i40e_pci_error_detected, + .slot_reset = i40e_pci_error_slot_reset, + .resume = i40e_pci_error_resume, +}; + +static struct pci_driver i40e_driver = { + .name = i40e_driver_name, + .id_table = i40e_pci_tbl, + .probe = i40e_probe, + .remove = i40e_remove, +#ifdef CONFIG_PM + .suspend = i40e_suspend, + .resume = i40e_resume, +#endif + .shutdown = i40e_shutdown, + .err_handler = &i40e_err_handler, + .sriov_configure = i40e_pci_sriov_configure, +}; + +/** + * i40e_init_module - Driver registration routine + * + * i40e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init i40e_init_module(void) +{ + pr_info("%s: %s - version %s\n", i40e_driver_name, + i40e_driver_string, i40e_driver_version_str); + pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + + i40e_dbg_init(); + return pci_register_driver(&i40e_driver); +} +module_init(i40e_init_module); + +/** + * i40e_exit_module - Driver exit cleanup routine + * + * i40e_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit i40e_exit_module(void) +{ + pci_unregister_driver(&i40e_driver); + i40e_dbg_exit(); +} +module_exit(i40e_exit_module); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c new file mode 100644 index 000000000..554e49d02 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -0,0 +1,1006 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_prototype.h" + +/** + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always thru the Shadow RAM. + **/ +i40e_status i40e_init_nvm(struct i40e_hw *hw) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + i40e_status ret_code = 0; + u32 fla, gens; + u8 sr_size; + + /* The SR size is stored regardless of the nvm programming mode + * as the blank mode may be used in the factory line. + */ + gens = rd32(hw, I40E_GLNVM_GENS); + sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> + I40E_GLNVM_GENS_SR_SIZE_SHIFT); + /* Switching to words (sr_size contains power of 2KB) */ + nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, I40E_GLNVM_FLA); + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ + nvm->timeout = I40E_MAX_NVM_TIMEOUT; + nvm->blank_nvm_mode = false; + } else { /* Blank programming mode */ + nvm->blank_nvm_mode = true; + ret_code = I40E_ERR_NVM_BLANK_MODE; + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); + } + + return ret_code; +} + +/** + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership for reading + * via the proper Admin Command. + **/ +i40e_status i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access) +{ + i40e_status ret_code = 0; + u64 gtime, timeout; + u64 time_left = 0; + + if (hw->nvm.blank_nvm_mode) + goto i40e_i40e_acquire_nvm_exit; + + ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, + 0, &time_left, NULL); + /* Reading the Global Device Timer */ + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + + /* Store the timeout */ + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; + + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { + /* Poll until the current NVM owner timeouts */ + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { + usleep_range(10000, 20000); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + ret_code = i40e_aq_request_resource(hw, + I40E_NVM_RESOURCE_ID, + access, 0, &time_left, + NULL); + if (!ret_code) { + hw->nvm.hw_semaphore_timeout = + I40E_MS_TO_GTIME(time_left) + gtime; + break; + } + } + if (ret_code) { + hw->nvm.hw_semaphore_timeout = 0; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); + } + } + +i40e_i40e_acquire_nvm_exit: + return ret_code; +} + +/** + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM resource via the proper Admin Command. + **/ +void i40e_release_nvm(struct i40e_hw *hw) +{ + if (!hw->nvm.blank_nvm_mode) + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); +} + +/** + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure + * + * Polls the SRCTL Shadow RAM register done bit. + **/ +static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + u32 srctl, wait_cnt; + + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ + for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { + srctl = rd32(hw, I40E_GLNVM_SRCTL); + if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { + ret_code = 0; + break; + } + udelay(5); + } + if (ret_code == I40E_ERR_TIMEOUT) + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); + return ret_code; +} + +/** + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + u32 sr_reg; + + if (offset >= hw->nvm.sr_size) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); + ret_code = I40E_ERR_PARAM; + goto read_nvm_exit; + } + + /* Poll the done bit first */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (!ret_code) { + /* Write the address and start reading */ + sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + (1 << I40E_GLNVM_SRCTL_START_SHIFT); + wr32(hw, I40E_GLNVM_SRCTL, sr_reg); + + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (!ret_code) { + sr_reg = rd32(hw, I40E_GLNVM_SRDATA); + *data = (u16)((sr_reg & + I40E_GLNVM_SRDATA_RDDATA_MASK) + >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + } + } + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); + +read_nvm_exit: + return ret_code; +} + +/** + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code = 0; + u16 index, word; + + /* Loop thru the selected region */ + for (word = 0; word < *words; word++) { + index = offset + word; + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); + if (ret_code) + break; + } + + /* Update the number of words read from the Shadow RAM */ + *words = word; + + return ret_code; +} + +/** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** + * i40e_write_nvm_aq - Writes Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + i40e_status ret_code = I40E_ERR_NVM; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_update_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, NULL); + + return ret_code; +} + +/** + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum + * + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). + **/ +static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + i40e_status ret_code = 0; + struct i40e_virt_mem vmem; + u16 pcie_alt_module = 0; + u16 checksum_local = 0; + u16 vpd_module = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; + + /* read pointer to VPD area */ + ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* read pointer to PCIe Alt Auto-load module */ + ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* Calculate SW checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules + */ + for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + + /* Skip Checksum word */ + if (i == I40E_SR_SW_CHECKSUM_WORD) + continue; + /* Skip VPD module (convert byte size to word count) */ + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; + } + /* Skip PCIe ALT module (convert byte size to word count) */ + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; + } + + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; + } + + *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; + +i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); + return ret_code; +} + +/** + * i40e_update_nvm_checksum - Updates the NVM checksum + * @hw: pointer to hardware structure + * + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. + * This function will commit SR to NVM. + **/ +i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u16 checksum; + + ret_code = i40e_calc_nvm_checksum(hw, &checksum); + if (!ret_code) + ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, + 1, &checksum, true); + + return ret_code; +} + +/** + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum + * + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. + **/ +i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + i40e_status ret_code = 0; + u16 checksum_sr = 0; + u16 checksum_local = 0; + + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + if (ret_code) + goto i40e_validate_nvm_checksum_exit; + + /* Do not use i40e_read_nvm_word() because we do not want to take + * the synchronization semaphores twice here. + */ + i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (checksum_local != checksum_sr) + ret_code = I40E_ERR_NVM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum) + *checksum = checksum_local; + +i40e_validate_nvm_checksum_exit: + return ret_code; +} + +static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno); +static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno); +static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static inline u8 i40e_nvmupd_get_module(u32 val) +{ + return (u8)(val & I40E_NVM_MOD_PNT_MASK); +} +static inline u8 i40e_nvmupd_get_transaction(u32 val) +{ + return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); +} + +static char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", +}; + +/** + * i40e_nvmupd_command - Process an NVM update command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * Dispatches command depending on what update state is current + **/ +i40e_status i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + + /* assume success */ + *errno = 0; + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno); + break; + + default: + /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_init - Handle NVM update state Init + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * Process legitimate commands of the Init state and conditionally set next + * state. Reject all other commands. + **/ +static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status = 0; + enum i40e_nvmupd_cmd upd_cmd; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + i40e_release_nvm(hw); + } + break; + + case I40E_NVMUPD_READ_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + } + break; + + case I40E_NVMUPD_WRITE_ERA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_erase(hw, cmd, errno); + if (status) + i40e_release_nvm(hw); + else + hw->aq.nvm_release_on_done = true; + } + break; + + case I40E_NVMUPD_WRITE_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->aq.nvm_release_on_done = true; + } + break; + + case I40E_NVMUPD_WRITE_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + } + break; + + case I40E_NVMUPD_CSUM_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_update_nvm_checksum(hw); + if (status) { + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + i40e_release_nvm(hw); + } else { + hw->aq.nvm_release_on_done = true; + } + } + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_ERR_NVM; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_reading - Handle NVM update state Reading + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands. + **/ +static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + case I40E_NVMUPD_READ_CON: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_READ_LCB: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + i40e_release_nvm(hw); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_writing - Handle NVM update state Writing + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands + **/ +static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + +retry: + switch (upd_cmd) { + case I40E_NVMUPD_WRITE_CON: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_WRITE_LCB: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (!status) + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_CSUM_CON: + status = i40e_update_nvm_checksum(hw); + if (status) { + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } + break; + + case I40E_NVMUPD_CSUM_LCB: + status = i40e_update_nvm_checksum(hw); + if (status) + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + else + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + i40e_status old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + + return status; +} + +/** + * i40e_nvmupd_validate_command - Validate given command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @errno: pointer to return error code + * + * Return one of the valid command types or I40E_NVMUPD_INVALID + **/ +static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno) +{ + enum i40e_nvmupd_cmd upd_cmd; + u8 transaction; + + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + + /* limits on data size */ + if ((cmd->data_size < 1) || + (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); + *errno = -EFAULT; + return I40E_NVMUPD_INVALID; + } + + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + } + break; + + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM|I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM|I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + } + break; + } + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *errno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *errno); + } + return upd_cmd; +} + +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @errno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno) +{ + i40e_status status = 0; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status = 0; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h new file mode 100644 index 000000000..ad802dd0f --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -0,0 +1,84 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include +#include + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +/* File to be the magic between shared code and + * actual OS primitives + */ + +#define hw_dbg(hw, S, A...) do {} while (0) + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40e_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __packed; + +#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40e %02x.%x " s, \ + (h)->bus.device, (h)->bus.func, \ + ##__VA_ARGS__); \ +} while (0) + +typedef enum i40e_status_code i40e_status; +#ifdef CONFIG_I40E_FCOE +#define I40E_FCOE +#endif +#endif /* _I40E_OSDEP_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h new file mode 100644 index 000000000..7b34f1e66 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -0,0 +1,311 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "i40e_virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40e_init_adminq(struct i40e_hw *hw); +i40e_status i40e_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); + +/* debug function for adminq */ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); + +u32 i40e_led_get(struct i40e_hw *hw); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); + +/* admin send queue commands */ + +i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_reset); +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 vsi_id, bool set_filter, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, bool enable_l2_filtering, + u16 *pveb_seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, bool *floating, + u16 *statistic_index, u16 *vebs_used, + u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); +/* i40e_common */ +i40e_status i40e_init_shared_code(struct i40e_hw *hw); +i40e_status i40e_pf_reset(struct i40e_hw *hw); +void i40e_clear_hw(struct i40e_hw *hw); +void i40e_clear_pxe_mode(struct i40e_hw *hw); +bool i40e_get_link_status(struct i40e_hw *hw); +i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size); +i40e_status i40e_validate_mac_addr(u8 *mac_addr); +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); +#ifdef I40E_FCOE +i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +#endif +/* prototype for functions used for NVM access */ +i40e_status i40e_init_nvm(struct i40e_hw *hw); +i40e_status i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access); +void i40e_release_nvm(struct i40e_hw *hw); +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); +i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum); +i40e_status i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); + +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW locks */ + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c new file mode 100644 index 000000000..a92b7725d --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -0,0 +1,728 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" +#include + +/* The XL710 timesync is very much like Intel's 82599 design when it comes to + * the fundamental clock design. However, the clock operations are much simpler + * in the XL710 because the device supports a full 64 bits of nanoseconds. + * Because the field is so wide, we can forgo the cycle counter and just + * operate with the nanosecond field directly without fear of overflow. + * + * Much like the 82599, the update period is dependent upon the link speed: + * At 40Gb link or no link, the period is 1.6ns. + * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 1Gb link, the period is multiplied by 20. (32ns) + * 1588 functionality is not supported at 100Mbps. + */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL + +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) + +/** + * i40e_ptp_read - Read the PHC time from the device + * @pf: Board private structure + * @ts: timespec structure to hold the current time value + * + * This function reads the PRTTSYN_TIME registers and stores them in a + * timespec. However, since the registers are 64 bits of nanoseconds, we must + * convert the result to a timespec before we can return. + **/ +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) +{ + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + /* The timer latches on the lowest register read. */ + lo = rd32(hw, I40E_PRTTSYN_TIME_L); + hi = rd32(hw, I40E_PRTTSYN_TIME_H); + + ns = (((u64)hi) << 32) | lo; + + *ts = ns_to_timespec64(ns); +} + +/** + * i40e_ptp_write - Write the PHC time to the device + * @pf: Board private structure + * @ts: timespec structure that holds the new time value + * + * This function writes the PRTTSYN_TIME registers with the user value. Since + * we receive a timespec from the stack, we must convert that timespec into + * nanoseconds before programming the registers. + **/ +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) +{ + struct i40e_hw *hw = &pf->hw; + u64 ns = timespec64_to_ns(ts); + + /* The timer will not update until the high register is written, so + * write the low register first. + */ + wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); +} + +/** + * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time + * @hwtstamps: Timestamp structure to update + * @timestamp: Timestamp from the hardware + * + * We need to convert the NIC clock value into a hwtstamp which can be used by + * the upper level timestamping functions. Since the timestamp is simply a 64- + * bit nanosecond value, we can call ns_to_ktime directly to handle this. + **/ +static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, + u64 timestamp) +{ + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + hwtstamps->hwtstamp = ns_to_ktime(timestamp); +} + +/** + * i40e_ptp_adjfreq - Adjust the PHC frequency + * @ptp: The PTP clock structure + * @ppb: Parts per billion adjustment from the base + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct i40e_hw *hw = &pf->hw; + u64 adj, freq, diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); /* Force any pending update before accessing. */ + adj = ACCESS_ONCE(pf->ptp_base_adj); + + freq = adj; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + if (neg_adj) + adj -= diff; + else + adj += diff; + + wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + + return 0; +} + +/** + * i40e_ptp_adjtime - Adjust the PHC time + * @ptp: The PTP clock structure + * @delta: Offset in nanoseconds to adjust the PHC time by + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + + i40e_ptp_read(pf, &now); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_gettime - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_read(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_settime - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_write(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem + * @ptp: The PTP clock structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * The XL710 does not support any of the ancillary features of the PHC + * subsystem, so this function may just return. + **/ +static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung + * @vsi: The VSI with the rings relevant to 1588 + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_ring *rx_ring; + unsigned long rx_event; + u32 prttsyn_stat; + int n; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * configured for Tx timestamping, we check if Rx timestamping is + * configured. We don't want to spuriously warn about Rx timestamp + * hangs if we don't care about the timestamps. + */ + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + return; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + /* Unless all four receive timestamp registers are latched, we are not + * concerned about a possible PTP Rx hang, so just update the timeout + * counter and exit. + */ + if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << + I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT1_MASK << + I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT2_MASK << + I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT3_MASK << + I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { + pf->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event. */ + rx_event = pf->last_rx_ptp_check; + for (n = 0; n < vsi->num_queue_pairs; n++) { + rx_ring = vsi->rx_rings[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + pf->last_rx_ptp_check = jiffies; + pf->rx_hwtstamp_cleared++; + dev_warn(&vsi->back->pdev->dev, + "%s: clearing Rx timestamp hang\n", + __func__); + } +} + +/** + * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp + * @pf: Board private structure + * + * Read the value of the Tx timestamp from the registers, convert it into a + * value consumable by the stack, and store that result into the shhwtstamps + * struct before returning it up the stack. + **/ +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + return; + + /* don't attempt to timestamp if we don't have an skb */ + if (!pf->ptp_tx_skb) + return; + + lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); + hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); + skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); +} + +/** + * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp + * @pf: Board private structure + * @skb: Particular skb to send timestamp with + * @index: Index into the receive timestamp registers for the timestamp + * + * The XL710 receives a notification in the receive descriptor with an offset + * into the set of RXTIME registers where the timestamp is for that skb. This + * function goes and fetches the receive timestamp from that offset, if a valid + * one exists. The RXTIME registers are in ns, so we must convert the result + * first. + **/ +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) +{ + u32 prttsyn_stat, hi, lo; + struct i40e_hw *hw; + u64 ns; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * doing Tx timestamping, check if Rx timestamping is configured. + */ + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + return; + + hw = &pf->hw; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + if (!(prttsyn_stat & (1 << index))) + return; + + lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); + hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); +} + +/** + * i40e_ptp_set_increment - Utility function to update clock increment rate + * @pf: Board private structure + * + * During a link change, the DMA frequency that drives the 1588 logic will + * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, + * we must update the increment value per clock tick. + **/ +void i40e_ptp_set_increment(struct i40e_pf *pf) +{ + struct i40e_link_status *hw_link_info; + struct i40e_hw *hw = &pf->hw; + u64 incval; + + hw_link_info = &hw->phy.link_info; + + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + + switch (hw_link_info->link_speed) { + case I40E_LINK_SPEED_10GB: + incval = I40E_PTP_10GB_INCVAL; + break; + case I40E_LINK_SPEED_1GB: + incval = I40E_PTP_1GB_INCVAL; + break; + case I40E_LINK_SPEED_100MB: + { + static int warn_once; + + if (!warn_once) { + dev_warn(&pf->pdev->dev, + "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); + warn_once++; + } + incval = 0; + break; + } + case I40E_LINK_SPEED_40GB: + default: + incval = I40E_PTP_40GB_INCVAL; + break; + } + + /* Write the new increment value into the increment register. The + * hardware will not update the clock until both registers have been + * written. + */ + wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + + /* Update the base adjustement value. */ + ACCESS_ONCE(pf->ptp_base_adj) = incval; + smp_mb(); /* Force the above update. */ +} + +/** + * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Obtain the current hardware timestamping settigs as requested. To do this, + * keep a shadow copy of the timestamp settings rather than attempting to + * deconstruct it from the registers. + **/ +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode + * @pf: Board private structure + * @config: hwtstamp settings requested or saved + * + * Control hardware registers to enter the specific mode requested by the + * user. Also used during reset path to ensure that timestamp settings are + * maintained. + * + * Note: modifies config in place, and may update the requested mode to be + * more broad if the specific filter is not directly supported. + **/ +static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, + struct hwtstamp_config *config) +{ + struct i40e_hw *hw = &pf->hw; + u32 tsyntype, regval; + + /* Reserved for future extensions. */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + pf->ptp_tx = false; + break; + case HWTSTAMP_TX_ON: + pf->ptp_tx = true; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pf->ptp_rx = false; + /* We set the type to V1, but do not enable UDP packet + * recognition. In this way, we should be as close to + * disabling PTP Rx timestamps as possible since V1 packets + * are always UDP, since L2 packets are a V2 feature. + */ + tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V2 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + /* Clear out all 1588-related registers to clear and unlatch them. */ + rd32(hw, I40E_PRTTSYN_STAT_0); + rd32(hw, I40E_PRTTSYN_TXTIME_H); + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable/disable the Tx timestamp interrupt based on user input. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + if (pf->ptp_tx) + regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + else + regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + regval = rd32(hw, I40E_PFINT_ICR0_ENA); + if (pf->ptp_tx) + regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + else + regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, regval); + + /* Although there is no simple on/off switch for Rx, we "disable" Rx + * timestamps by setting to V1 only mode and clear the UDP + * recognition. This ought to disable all PTP Rx timestamps as V1 + * packets are always over UDP. Note that software is configured to + * ignore Rx timestamps via the pf->ptp_rx flag. + */ + regval = rd32(hw, I40E_PRTTSYN_CTL1); + /* clear everything but the enable bit */ + regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + /* now enable bits for desired Rx timestamps */ + regval |= tsyntype; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + return 0; +} + +/** + * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Respond to the user filter requests and make the appropriate hardware + * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping + * logic, so keep track in software of whether to indicate these timestamps + * or not. + * + * It is permissible to "upgrade" the user request to a broader filter, as long + * as the user receives the timestamps they care about and the user is notified + * the filter has been broadened. + **/ +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = i40e_ptp_set_timestamp_mode(pf, &config); + if (err) + return err; + + /* save these settings for future reference */ + pf->tstamp_config = config; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_create_clock - Create PTP clock device for userspace + * @pf: Board private structure + * + * This function creates a new PTP clock device. It only creates one if we + * don't already have one, so it is safe to call. Will return error if it + * can't create one, but success if we already have a device. Should be used + * by i40e_ptp_init to create clock initially, and prevent global resets from + * creating new clock devices. + **/ +static long i40e_ptp_create_clock(struct i40e_pf *pf) +{ + /* no need to create a clock device if we already have one */ + if (!IS_ERR_OR_NULL(pf->ptp_clock)) + return 0; + + strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + pf->ptp_caps.owner = THIS_MODULE; + pf->ptp_caps.max_adj = 999999999; + pf->ptp_caps.n_ext_ts = 0; + pf->ptp_caps.pps = 0; + pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; + pf->ptp_caps.adjtime = i40e_ptp_adjtime; + pf->ptp_caps.gettime64 = i40e_ptp_gettime; + pf->ptp_caps.settime64 = i40e_ptp_settime; + pf->ptp_caps.enable = i40e_ptp_feature_enable; + + /* Attempt to register the clock before enabling the hardware. */ + pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); + if (IS_ERR(pf->ptp_clock)) { + return PTR_ERR(pf->ptp_clock); + } + + /* clear the hwtstamp settings here during clock create, instead of + * during regular init, so that we can maintain settings across a + * reset or suspend. + */ + pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * i40e_ptp_init - Initialize the 1588 support after device probe or reset + * @pf: Board private structure + * + * This function sets device up for 1588 support. The first time it is run, it + * will create a PHC clock device. It does not create a clock device if one + * already exists. It also reconfigures the device after a reset. + **/ +void i40e_ptp_init(struct i40e_pf *pf) +{ + struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + struct i40e_hw *hw = &pf->hw; + u32 pf_id; + long err; + + /* Only one PF is assigned to control 1588 logic per port. Do not + * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID + */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) { + pf->flags &= ~I40E_FLAG_PTP; + dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", + __func__, + netdev->name); + return; + } + + /* we have to initialize the lock first, since we can't control + * when the user will enter the PHC device entry points + */ + spin_lock_init(&pf->tmreg_lock); + + /* ensure we have a clock device */ + err = i40e_ptp_create_clock(pf); + if (err) { + pf->ptp_clock = NULL; + dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", + __func__); + } else { + struct timespec64 ts; + u32 regval; + + dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, + netdev->name); + pf->flags |= I40E_FLAG_PTP; + + /* Ensure the clocks are running. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + regval = rd32(hw, I40E_PRTTSYN_CTL1); + regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + /* Set the increment value per clock tick. */ + i40e_ptp_set_increment(pf); + + /* reset timestamping mode */ + i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); + + /* Set the clock value. */ + ts = ktime_to_timespec64(ktime_get_real()); + i40e_ptp_settime(&pf->ptp_caps, &ts); + } +} + +/** + * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC + * @pf: Board private structure + * + * This function handles the cleanup work required from the initialization by + * clearing out the important information and unregistering the PHC. + **/ +void i40e_ptp_stop(struct i40e_pf *pf) +{ + pf->flags &= ~I40E_FLAG_PTP; + pf->ptp_tx = false; + pf->ptp_rx = false; + + if (pf->ptp_tx_skb) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + } + + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, + pf->vsi[pf->lan_vsi]->netdev->name); + } +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h new file mode 100644 index 000000000..522d6df51 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -0,0 +1,3369 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_SHIFT 0 +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_SHIFT 0 +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQH_ATQH_SHIFT 0 +#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) +#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ +#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) +#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) +#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) +#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) +#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) +#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ +#define I40E_GL_ATQT_ATQT_SHIFT 0 +#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) +#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ +#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 +#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ +#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ +#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 +#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 +#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ +#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 +#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 +#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) +#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 +#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ +#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 +#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) +#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 +#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_GLQF_HKEY_MAX_INDEX 12 +#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 +#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) +#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 +#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) +#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 +#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) +#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 +#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1_L_MAX_INDEX 143 +#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 +#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) +#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR2_L_MAX_INDEX 143 +#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 +#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ +#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 +#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 +#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) +#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 +#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 25 +#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) +#endif diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_status.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_status.h new file mode 100644 index 000000000..5f9cac55a --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -0,0 +1,100 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c new file mode 100644 index 000000000..9d95042d5 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -0,0 +1,2778 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include +#include +#include "i40e.h" +#include "i40e_prototype.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) +#define I40E_FD_CLEAN_DELAY 10 +/** + * i40e_program_fdir_filter - Program a Flow Director filter + * @fdir_data: Packet data that will be filter parameters + * @raw_packet: the pre-allocated packet buffer for FDir + * @pf: The PF pointer + * @add: True for add/update, False for remove + **/ +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, + struct i40e_pf *pf, bool add) +{ + struct i40e_filter_program_desc *fdir_desc; + struct i40e_tx_buffer *tx_buf, *first; + struct i40e_tx_desc *tx_desc; + struct i40e_ring *tx_ring; + unsigned int fpt, dcc; + struct i40e_vsi *vsi; + struct device *dev; + dma_addr_t dma; + u32 td_cmd = 0; + u16 delay = 0; + u16 i; + + /* find existing FDIR VSI */ + vsi = NULL; + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) + vsi = pf->vsi[i]; + if (!vsi) + return -ENOENT; + + tx_ring = vsi->tx_rings[0]; + dev = tx_ring->dev; + + /* we need two descriptors to add/del a filter and we can wait */ + do { + if (I40E_DESC_UNUSED(tx_ring) > 1) + break; + msleep_interruptible(1); + delay++; + } while (delay < I40E_FD_CLEAN_DELAY); + + if (!(I40E_DESC_UNUSED(tx_ring) > 1)) + return -EAGAIN; + + dma = dma_map_single(dev, raw_packet, + I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_fail; + + /* grab the next descriptor */ + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + first = &tx_ring->tx_bi[i]; + memset(first, 0, sizeof(struct i40e_tx_buffer)); + + tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; + + fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; + + fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK; + + fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK; + + /* Use LAN VSI Id if not programmed by user */ + if (fdir_data->dest_vsi == 0) + fpt |= (pf->vsi[pf->lan_vsi]->id) << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; + else + fpt |= ((u32)fdir_data->dest_vsi << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK; + + dcc = I40E_TX_DESC_DTYPE_FILTER_PROG; + + if (add) + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; + else + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; + + dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK; + + dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK; + + if (fdir_data->cnt_index != 0) { + dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dcc |= ((u32)fdir_data->cnt_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + } + + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt); + fdir_desc->rsvd = cpu_to_le32(0); + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); + fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); + + /* Now program a dummy descriptor */ + i = tx_ring->next_to_use; + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_buf = &tx_ring->tx_bi[i]; + + tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; + + memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); + dma_unmap_addr_set(tx_buf, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; + + tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; + tx_buf->raw_buf = (void *)raw_packet; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); + + /* set the timestamp */ + tx_buf->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + + /* Mark the data descriptor to be watched */ + first->next_to_watch = tx_desc; + + writel(tx_ring->next_to_use, tx_ring->tail); + return 0; + +dma_fail: + return -1; +} + +#define IP_HEADER_OFFSET 14 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +/** + * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct udphdr *udp; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + udp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + udp->source = fd_data->src_port; + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + return err ? -EOPNOTSUPP : 0; +} + +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +/** + * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct tcphdr *tcp; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + /* Dummy packet */ + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, + 0x0, 0x72, 0, 0, 0, 0}; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + tcp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + tcp->source = fd_data->src_port; + + if (add) { + pf->fd_tcp_rule++; + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + } else { + pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? + (pf->fd_tcp_rule - 1) : 0; + if (pf->fd_tcp_rule == 0) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + } + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Always returns -EOPNOTSUPP + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + return -EOPNOTSUPP; +} + +#define I40E_IP_DUMMY_PACKET_LEN 34 +/** + * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + + ip->saddr = fd_data->src_ip[0]; + ip->daddr = fd_data->dst_ip[0]; + ip->protocol = 0; + + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir - Build raw packets to add/del fdir filter + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * @add: true adds a filter, false removes it + * + **/ +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add) +{ + struct i40e_pf *pf = vsi->back; + int ret; + + switch (input->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ret = i40e_add_del_fdir_tcpv4(vsi, input, add); + break; + case UDP_V4_FLOW: + ret = i40e_add_del_fdir_udpv4(vsi, input, add); + break; + case SCTP_V4_FLOW: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; + case IPV4_FLOW: + ret = i40e_add_del_fdir_ipv4(vsi, input, add); + break; + case IP_USER_FLOW: + switch (input->ip4_proto) { + case IPPROTO_TCP: + ret = i40e_add_del_fdir_tcpv4(vsi, input, add); + break; + case IPPROTO_UDP: + ret = i40e_add_del_fdir_udpv4(vsi, input, add); + break; + case IPPROTO_SCTP: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; + default: + ret = i40e_add_del_fdir_ipv4(vsi, input, add); + break; + } + break; + default: + dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", + input->flow_type); + ret = -EINVAL; + } + + /* The buffer allocated here is freed by the i40e_clean_tx_ring() */ + return ret; +} + +/** + * i40e_fd_handle_status - check the Programming Status for FD + * @rx_ring: the Rx ring for this descriptor + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. + * @prog_id: the id originally used for programming + * + * This is used to verify if the FD programming or invalidation + * requested by SW to the HW is successful or not and take actions accordingly. + **/ +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct pci_dev *pdev = pf->pdev; + u32 fcnt_prog, fcnt_avail; + u32 error; + u64 qw; + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + + if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + /* Check if the programming error is for ATR. + * If so, auto disable ATR and set a state for + * flush in progress. Next time we come here if flush is in + * progress do nothing, once flush is complete the state will + * be cleared. + */ + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + + pf->fd_add_err++; + /* store the current atr filter count */ + pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); + + if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + } + + /* filter programming failed most likely due to table full */ + fcnt_prog = i40e_get_global_fd_count(pf); + fcnt_avail = pf->fdir_pf_filter_count; + /* If ATR is running fcnt_prog can quickly change, + * if we are very close to full, it makes sense to disable + * FD ATR/SB and then re-enable it when there is room. + */ + if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + !(pf->auto_disable_flags & + I40E_FLAG_FD_SB_ENABLED)) { + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_SB_ENABLED; + } + } else { + dev_info(&pdev->dev, + "FD filter programming failed due to incorrect filter parameters\n"); + } + } else if (error == + (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", + rx_desc->wb.qword0.hi_dword.fd_id); + } +} + +/** + * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + else + dev_kfree_skb_any(tx_buffer->skb); + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * i40e_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + **/ +void i40e_clean_tx_ring(struct i40e_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)); +} + +/** + * i40e_free_tx_resources - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void i40e_free_tx_resources(struct i40e_ring *tx_ring) +{ + i40e_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** + * i40e_get_tx_pending - how many tx descriptors not processed + * @tx_ring: the ring of descriptors + * + * Since there is no access to the ring head register + * in XL710, we need to use our local copies + **/ +static u32 i40e_get_tx_pending(struct i40e_ring *ring) +{ + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +/** + * i40e_check_tx_hang - Is there a hang in the Tx queue + * @tx_ring: the ring of descriptors + **/ +static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) +{ + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = i40e_get_tx_pending(tx_ring); + struct i40e_pf *pf = tx_ring->vsi->back; + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * PFC clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, + &tx_ring->state); + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { + if (I40E_DEBUG_FLOW & pf->hw.debug_mask) + dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", + tx_pending, tx_ring->queue_index); + pf->tx_sluggish_count++; + } else { + /* update completed stats and disarm the hang check */ + tx_ring->tx_stats.tx_done_old = tx_done; + clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +#define WB_STRIDE 0x3 + +/** + * i40e_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; + struct i40e_tx_desc *tx_desc; + unsigned int total_packets = 0; + unsigned int total_bytes = 0; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buf->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ + if (budget && + !((i & WB_STRIDE) == WB_STRIDE) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + else + tx_ring->arm_wb = false; + + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" + " VSI <%d>\n" + " Tx Queue <%d>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->vsi->seid, + tx_ring->queue_index, + tx_ring->next_to_use, i); + dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->tx_bi[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + dev_info(tx_ring->dev, + "tx hang detected on queue %d, reset requested\n", + tx_ring->queue_index); + + /* do not fire the reset immediately, wait for the stack to + * decide we are truly stuck, also prevents every queue from + * simultaneously requesting a reset + */ + + /* the adapter is about to reset, no point in enabling polling */ + budget = 1; + } + + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + val); +} + +/** + * i40e_set_new_dynamic_itr - Find new ITR level + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte counts during + * the last interrupt. The advantage of per interrupt computation + * is faster updates and more accurate ITR for the current traffic + * pattern. Constants in this function were computed based on + * theoretical maximum wire speed and thresholds were set based on + * testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +{ + enum i40e_latency_range new_latency_range = rc->latency_range; + u32 new_itr = rc->itr; + int bytes_per_int; + + if (rc->total_packets == 0 || !rc->itr) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) + */ + bytes_per_int = rc->total_bytes / rc->itr; + switch (rc->itr) { + case I40E_LOWEST_LATENCY: + if (bytes_per_int > 10) + new_latency_range = I40E_LOW_LATENCY; + break; + case I40E_LOW_LATENCY: + if (bytes_per_int > 20) + new_latency_range = I40E_BULK_LATENCY; + else if (bytes_per_int <= 10) + new_latency_range = I40E_LOWEST_LATENCY; + break; + case I40E_BULK_LATENCY: + if (bytes_per_int <= 20) + rc->latency_range = I40E_LOW_LATENCY; + break; + } + + switch (new_latency_range) { + case I40E_LOWEST_LATENCY: + new_itr = I40E_ITR_100K; + break; + case I40E_LOW_LATENCY: + new_itr = I40E_ITR_20K; + break; + case I40E_BULK_LATENCY: + new_itr = I40E_ITR_8K; + break; + default: + break; + } + + if (new_itr != rc->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * rc->itr) / + ((9 * new_itr) + rc->itr); + rc->itr = new_itr & I40E_MAX_ITR; + } + + rc->total_bytes = 0; + rc->total_packets = 0; +} + +/** + * i40e_update_dynamic_itr - Adjust ITR based on bytes per int + * @q_vector: the vector to adjust + **/ +static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) +{ + u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; + struct i40e_hw *hw = &q_vector->vsi->back->hw; + u32 reg_addr; + u16 old_itr; + + reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1); + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) + wr32(hw, reg_addr, q_vector->rx.itr); + + reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1); + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) + wr32(hw, reg_addr, q_vector->tx.itr); +} + +/** + * i40e_clean_programming_status - clean the programming status descriptor + * @rx_ring: the rx ring that has this descriptor + * @rx_desc: the rx descriptor written back by HW + * + * Flow director should handle FD_FILTER_STATUS to check its filter programming + * status being successful or not and take actions accordingly. FCoE should + * handle its context/filter programming/invalidation status and take actions. + * + **/ +static void i40e_clean_programming_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + u64 qw; + u8 id; + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + + if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) + i40e_fd_handle_status(rx_ring, rx_desc, id); +#ifdef I40E_FCOE + else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || + (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS)) + i40e_fcoe_handle_status(rx_ring, rx_desc, id); +#endif +} + +/** + * i40e_setup_tx_descriptors - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + **/ +int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_bi) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + **/ +void i40e_clean_rx_ring(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_bi) + return; + + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->dma) { + dma_unmap_single(dev, + rx_bi->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + } + if (rx_bi->skb) { + dev_kfree_skb(rx_bi->skb); + rx_bi->skb = NULL; + } + if (rx_bi->page) { + if (rx_bi->page_dma) { + dma_unmap_page(dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + __free_page(rx_bi->page); + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + } + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * i40e_free_rx_resources - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void i40e_free_rx_resources(struct i40e_ring *rx_ring) +{ + i40e_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** + * i40e_setup_rx_descriptors - Allocate Rx descriptors + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_bi) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) + ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) + : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + struct sk_buff *skb; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct i40e_vsi *vsi = rx_ring->vsi; + u64 flags = vsi->back->flags; + + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + if (flags & I40E_FLAG_IN_NETPOLL) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_status: status value of last descriptor in packet + * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet + **/ +static inline void i40e_rx_checksum(struct i40e_vsi *vsi, + struct sk_buff *skb, + u32 rx_status, + u32 rx_error, + u16 rx_ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); + bool ipv4 = false, ipv6 = false; + bool ipv4_tunnel, ipv6_tunnel; + __wsum rx_udp_csum; + struct iphdr *iph; + __sum16 csum; + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum enabled and ip headers found? */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + return; + + /* did the hardware decode the packet and checksum? */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + return; + + /* both known and outer_ip must be set for the below code to work */ + if (!(decoded.known && decoded.outer_ip)) + return; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) + ipv4 = true; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) + ipv6 = true; + + if (ipv4 && + (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | + (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + goto checksum_fail; + + /* likely incorrect csum if alternate IP extension headers found */ + if (ipv6 && + rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + /* don't increment checksum err here, non-fatal err */ + return; + + /* there was some L4 error, count error and punt packet to the stack */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + goto checksum_fail; + + /* handle packets that were not able to be checksummed due + * to arrival speed, in this case the stack can compute + * the csum. + */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + return; + + /* If VXLAN traffic has an outer UDPv4 checksum we need to check + * it in the driver, hardware does not do it for us. + * Since L3L4P bit was set we assume a valid IHL value (>=5) + * so the total length of IPv4 header is IHL*4 bytes + * The UDP_0 bit *may* bet set if the *inner* header is UDP + */ + if (ipv4_tunnel) { + skb->transport_header = skb->mac_header + + sizeof(struct ethhdr) + + (ip_hdr(skb)->ihl * 4); + + /* Add 4 bytes for VLAN tagged packets */ + skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + ? VLAN_HLEN : 0; + + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; + + return; + +checksum_fail: + vsi->back->hw_csum_rx_error++; +} + +/** + * i40e_rx_hash - returns the hash value from the Rx descriptor + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline u32 i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc) +{ + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if ((ring->netdev->features & NETIF_F_RXHASH) && + (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) + return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + else + return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + const int current_node = numa_node_id(); + struct i40e_vsi *vsi = rx_ring->vsi; + u16 i = rx_ring->next_to_clean; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u8 rx_ptype; + u64 qword; + + if (budget <= 0) + return 0; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> + I40E_RXD_QW1_LENGTH_HBUF_SHIFT; + rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> + I40E_RXD_QW1_LENGTH_SPH_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); + rx_bi->skb = NULL; + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; + if (rx_hbo) + len = I40E_RX_HDR_SIZE; + else + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; + } + + /* Get the rest of the data if this was a header split */ + if (rx_packet_len) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset, + rx_packet_len); + + skb->len += rx_packet_len; + skb->data_len += rx_packet_len; + skb->truesize += rx_packet_len; + + if ((page_count(rx_bi->page) == 1) && + (page_to_nid(rx_bi->page) == current_node)) + get_page(rx_bi->page); + else + rx_bi->page = NULL; + + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + struct i40e_rx_buffer *next_buffer; + + next_buffer = &rx_ring->rx_bi[i]; + next_buffer->skb = skb; + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int i40e_napi_poll(struct napi_struct *napi, int budget) +{ + struct i40e_q_vector *q_vector = + container_of(napi, struct i40e_q_vector, napi); + struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; + bool clean_complete = true; + bool arm_wb = false; + int budget_per_ring; + int cleaned; + + if (test_bit(__I40E_DOWN, &vsi->state)) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) { + clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + arm_wb |= ring->arm_wb; + } + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + if (arm_wb) + i40e_force_wb(vsi, q_vector); + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete(napi); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || + ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + i40e_update_dynamic_itr(q_vector); + + if (!test_bit(__I40E_DOWN, &vsi->state)) { + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } else { + struct i40e_hw *hw = &vsi->back->hw; + /* We re-enable the queue 0 cause, but + * don't worry about dynamic_enable + * because we left it on for the other + * possible interrupts during napi + */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_RQCTL(0), qval); + + qval = rd32(hw, I40E_QINT_TQCTL(0)); + qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + + i40e_irq_dynamic_enable_icr0(vsi->back); + } + } + + return 0; +} + +/** + * i40e_atr - Add a Flow Director ATR filter + * @tx_ring: ring to add programming descriptor to + * @skb: send buffer + * @flags: send flags + * @protocol: wire protocol + **/ +static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 flags, __be16 protocol) +{ + struct i40e_filter_program_desc *fdir_desc; + struct i40e_pf *pf = tx_ring->vsi->back; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + unsigned int hlen; + u32 flex_ptype, dtype_cmd; + u16 i; + + /* make sure ATR is enabled */ + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + /* if sampling is disabled do nothing */ + if (!tx_ring->atr_sample_rate) + return; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if (protocol == htons(ETH_P_IP)) { + if (hdr.ipv4->protocol != IPPROTO_TCP) + return; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + } else if (protocol == htons(ETH_P_IPV6)) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + + hlen = sizeof(struct ipv6hdr); + } else { + return; + } + + th = (struct tcphdr *)(hdr.network + hlen); + + /* Due to lack of space, no more new filters can be programmed */ + if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + tx_ring->atr_count++; + + /* sample on all syn/fin/rst packets or once every atr sample rate */ + if (!th->fin && + !th->syn && + !th->rst && + (tx_ring->atr_count < tx_ring->atr_sample_rate)) + return; + + tx_ring->atr_count = 0; + + /* grab the next descriptor */ + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; + flex_ptype |= (protocol == htons(ETH_P_IP)) ? + (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : + (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); + + flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; + + dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; + + dtype_cmd |= (th->fin || th->rst) ? + (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT) : + (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << + I40E_TXD_FLTR_QW1_DEST_SHIFT; + + dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; + + dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dtype_cmd |= + ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); + fdir_desc->rsvd = cpu_to_le32(0); + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); + fdir_desc->fd_id = cpu_to_le32(0); +} + +/** + * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @skb: send buffer + * @tx_ring: ring to send buffer on + * @flags: the tx flags to be set + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + **/ +#ifdef I40E_FCOE +int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +#else +static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +#endif +{ + __be16 protocol = skb->protocol; + u32 tx_flags = 0; + + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN, check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + return -EINVAL; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_SW_VLAN; + } + + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + + /* Insert 802.1p priority into VLAN header */ + if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL)) { + tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= (skb->priority & 0x7) << + I40E_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + I40E_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + } + } + +out: + *flags = tx_flags; + return 0; +} + +/** + * i40e_tso - set up the tso context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * @protocol: the send protocol + * @hdr_len: ptr to the size of the packet header + * @cd_tunneling: ptr to context descriptor bits + * + * Returns 0 if no TSO can happen, 1 if tso is going, or error + **/ +static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) +{ + u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u32 l4len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + } else if (ipv6h->version == 6) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = (skb->encapsulation + ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb)) + l4len; + + /* find the field values */ + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tsyn - set up the tsyn context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * + * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen + **/ +static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u64 *cd_type_cmd_tso_mss) +{ + struct i40e_pf *pf; + + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return 0; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (tx_flags & I40E_TX_FLAGS_TSO) + return 0; + + /* only timestamp the outbound packet if the user has requested it and + * we are not already transmitting a packet to be timestamped + */ + pf = i40e_netdev_to_pf(tx_ring->netdev); + if (!(pf->flags & I40E_FLAG_PTP)) + return 0; + + if (pf->ptp_tx && + !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + pf->ptp_tx_skb = skb_get(skb); + } else { + return 0; + } + + *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT; + + return 1; +} + +/** + * i40e_tx_enable_csum - Enable Tx checksum offloads + * @skb: send buffer + * @tx_flags: Tx flags currently set + * @td_cmd: Tx descriptor command bits to set + * @td_offset: Tx descriptor header offsets to set + * @cd_tunneling: ptr to context desc bits + **/ +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) +{ + struct ipv6hdr *this_ipv6_hdr; + unsigned int this_tcp_hdrlen; + struct iphdr *this_ip_hdr; + u32 network_hdr_len; + u8 l4_hdr = 0; + u32 l4_tunnel = 0; + + if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } + network_hdr_len = skb_inner_network_header_len(skb); + this_ip_hdr = inner_ip_hdr(skb); + this_ipv6_hdr = inner_ipv6_hdr(skb); + this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + if (tx_flags & I40E_TX_FLAGS_IPV4) { + + if (tx_flags & I40E_TX_FLAGS_TSO) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + ip_hdr(skb)->check = 0; + } else { + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + } + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) + ip_hdr(skb)->check = 0; + } + + /* Now set the ctx descriptor fields */ + *cd_tunneling |= (skb_network_header_len(skb) >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | + ((skb_inner_network_offset(skb) - + skb_transport_offset(skb)) >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + } else { + network_hdr_len = skb_network_header_len(skb); + this_ip_hdr = ip_hdr(skb); + this_ipv6_hdr = ipv6_hdr(skb); + this_tcp_hdrlen = tcp_hdrlen(skb); + } + + /* Enable IP checksum offloads */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + l4_hdr = this_ip_hdr->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (tx_flags & I40E_TX_FLAGS_TSO) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + this_ip_hdr->check = 0; + } else { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + } + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + l4_hdr = this_ipv6_hdr->nexthdr; + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ + *td_offset |= (skb_network_offset(skb) >> 1) << + I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L4 checksum offloads */ + switch (l4_hdr) { + case IPPROTO_TCP: + /* enable checksum offloads */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (this_tcp_hdrlen >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/** + * i40e_create_tx_ctx Build the Tx context descriptor + * @tx_ring: ring to create the descriptor on + * @cd_type_cmd_tso_mss: Quad Word 1 + * @cd_tunneling: Quad Word 0 - bits 0-31 + * @cd_l2tag2: Quad Word 0 - bits 32-63 + **/ +static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, + const u64 cd_type_cmd_tso_mss, + const u32 cd_tunneling, const u32 cd_l2tag2) +{ + struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; + + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) + return; + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* cpu_to_le32 and assign to struct fields */ + context_desc->tunneling_params = cpu_to_le32(cd_tunneling); + context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); +} + +/** + * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40e_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +#ifdef I40E_FCOE +int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#else +static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#endif +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 0; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } + if (j == I40E_MAX_BUFFER_TXD) { + linearize = true; + break; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** + * i40e_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * @td_cmd: the command field in the descriptor + * @td_offset: offset for checksum or crc + **/ +#ifdef I40E_FCOE +void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +#endif +{ + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + struct skb_frag_struct *frag; + struct i40e_tx_buffer *tx_bi; + struct i40e_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + u32 td_tag = 0; + dma_addr_t dma; + u16 gso_segs; + + if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> + I40E_TX_FLAGS_VLAN_SHIFT; + } + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + gso_segs = skb_shinfo(skb)->gso_segs; + else + gso_segs = 1; + + /* multiply data chunks by size of headers */ + first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); + first->gso_segs = gso_segs; + first->skb = skb; + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, + I40E_MAX_DATA_PER_TXD, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += I40E_MAX_DATA_PER_TXD; + size -= I40E_MAX_DATA_PER_TXD; + + tx_desc->buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_bi[i]; + } + + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + /* notify HW of packet */ + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_bi[i]; + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +#ifdef I40E_FCOE +int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +#else +static int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +#endif +{ + unsigned int f; + int count = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + count += TXD_USE_COUNT(skb_headlen(skb)); + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return 0; + } + return count; +} + +/** + * i40e_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u32 cd_tunneling = 0, cd_l2tag2 = 0; + struct i40e_tx_buffer *first; + u32 td_offset = 0; + u32 tx_flags = 0; + __be16 protocol; + u32 td_cmd = 0; + u8 hdr_len = 0; + int tsyn; + int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* obtain protocol of skb */ + protocol = vlan_get_protocol(skb); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* setup IPv4/IPv6 offloads */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= I40E_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + tx_flags |= I40E_TX_FLAGS_IPV6; + + tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + &cd_type_cmd_tso_mss, &cd_tunneling); + + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= I40E_TX_FLAGS_TSO; + + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); + + if (tsyn) + tx_flags |= I40E_TX_FLAGS_TSYN; + + if (i40e_chk_linearize(skb, tx_flags)) + if (skb_linearize(skb)) + goto out_drop; + + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Always offload the checksum, since it's in the data descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + tx_flags |= I40E_TX_FLAGS_CSUM; + + i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + } + + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + cd_tunneling, cd_l2tag2); + + /* Add Flow Director ATR if it's enabled. + * + * NOTE: this must always be directly before the data descriptor. + */ + i40e_atr(tx_ring, skb, tx_flags, protocol); + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, I40E_MIN_TX_LEN)) + return NETDEV_TX_OK; + + return i40e_xmit_frame_ring(skb, tx_ring); +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h new file mode 100644 index 000000000..4b0b8102c --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -0,0 +1,313 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + +/* Interrupt Throttling and Rate Limiting Goodies */ + +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 0x0005 +#define I40E_ITR_20K 0x0019 +#define I40E_ITR_8K 0x003E +#define I40E_ITR_4K 0x007A +#define I40E_ITR_RX_DEF I40E_ITR_8K +#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ +#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ +#define I40E_DEFAULT_IRQ_WORK 256 +#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) +#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) +#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +#define I40E_QUEUE_END_OF_LIST 0x7FF + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +/* Supported Rx Buffer Sizes */ +#define I40E_RXBUFFER_512 512 /* Used for packet split */ +#define I40E_RXBUFFER_2048 2048 +#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ +#define I40E_RXBUFFER_4096 4096 +#define I40E_RXBUFFER_8192 8192 +#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ + +/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + +#define I40E_RX_NEXT_DESC(r, i, n) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + (n) = I40E_RX_DESC((r), (i)); \ + } while (0) + +#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ + do { \ + I40E_RX_NEXT_DESC((r), (i), (n)); \ + prefetch((n)); \ + } while (0) + +#define i40e_rx_desc i40e_32byte_rx_desc + +#define I40E_MAX_BUFFER_TXD 8 +#define I40E_MIN_TX_LEN 17 +#define I40E_MAX_DATA_PER_TXD 8192 + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 + +#define I40E_TX_FLAGS_CSUM (u32)(1) +#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) +#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) +#define I40E_TX_FLAGS_TSO (u32)(1 << 3) +#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) +#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) +#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) +#define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) +#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 +#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define I40E_TX_FLAGS_VLAN_SHIFT 16 + +struct i40e_tx_buffer { + struct i40e_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + void *raw_buf; + }; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct i40e_rx_buffer { + struct sk_buff *skb; + void *hdr_buf; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct i40e_queue_stats { + u64 packets; + u64 bytes; +}; + +struct i40e_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct i40e_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buff_failed; +}; + +enum i40e_ring_state_t { + __I40E_TX_FDIR_INIT_DONE, + __I40E_TX_XPS_INIT_DONE, + __I40E_TX_DETECT_HANG, + __I40E_HANG_CHECK_ARMED, + __I40E_RX_PS_ENABLED, + __I40E_RX_16BYTE_DESC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define ring_is_16byte_desc_enabled(ring) \ + test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define set_ring_16byte_desc_enabled(ring) \ + set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define clear_ring_16byte_desc_enabled(ring) \ + clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) + +/* struct that defines a descriptor ring, associated with a VSI */ +struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + union { + struct i40e_tx_buffer *tx_bi; + struct i40e_rx_buffer *rx_bi; + }; + unsigned long state; + u16 queue_index; /* Queue number of ring */ + u8 dcb_tc; /* Traffic class of ring */ + u8 __iomem *tail; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 + u8 hsplit; +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + u8 atr_sample_rate; + u8 atr_count; + + unsigned long last_rx_timestamp; + + bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ + + /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct i40e_tx_queue_stats tx_stats; + struct i40e_rx_queue_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + + struct i40e_vsi *vsi; /* Backreference to associated VSI */ + struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ +} ____cacheline_internodealigned_in_smp; + +enum i40e_latency_range { + I40E_LOWEST_LATENCY = 0, + I40E_LOW_LATENCY = 1, + I40E_BULK_LATENCY = 2, +}; + +struct i40e_ring_container { + /* array of pointers to rings */ + struct i40e_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; + enum i40e_latency_range latency_range; + u16 itr; +}; + +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); +netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void i40e_clean_tx_ring(struct i40e_ring *tx_ring); +void i40e_clean_rx_ring(struct i40e_ring *rx_ring); +int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); +int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); +void i40e_free_tx_resources(struct i40e_ring *tx_ring); +void i40e_free_rx_resources(struct i40e_ring *rx_ring); +int i40e_napi_poll(struct napi_struct *napi, int budget); +#ifdef I40E_FCOE +void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset); +int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); +int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, u32 *flags); +#endif +#endif /* _I40E_TXRX_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h new file mode 100644 index 000000000..568e855da --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -0,0 +1,1421 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) (mask << shift) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_X710, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1, + I40E_VSI_VMDQ2, + I40E_VSI_CTRL, + I40E_VSI_FCOE, + I40E_VSI_MIRROR, + I40E_VSI_SRIOV, + I40E_VSI_FDIR, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + u32 autoneg_advertised; + u32 phy_id; + u32 module_type; + bool get_link_info; + enum i40e_media_type media_type; +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool mfp_mode_1; + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 + u32 numapps; + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; + struct i40e_dcbx_config remote_dcbx_config; + + /* debug mask */ + u32 debug_mask; +}; + +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ + << I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ + I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ + I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ + I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-30 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-40 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ + I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#ifdef I40E_FCOE +/* Statistics collected per function for FCoE */ +struct i40e_fcoe_stats { + u64 rx_fcoe_packets; /* fcoeprc */ + u64 rx_fcoe_dwords; /* focedwrc */ + u64 rx_fcoe_dropped; /* fcoerpdc */ + u64 tx_fcoe_packets; /* fcoeptc */ + u64 tx_fcoe_dwords; /* focedwtc */ + u64 fcoe_bad_fccrc; /* fcoecrc */ + u64 fcoe_last_error; /* fcoelast */ + u64 fcoe_ddp_count; /* fcoeddpc */ +}; + +/* offset to per function FCoE statistics block */ +#define I40E_FCOE_VF_STAT_OFFSET 0 +#define I40E_FCOE_PF_STAT_OFFSET 128 +#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF) + +#endif +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_SR_PBA_FLAGS 0x15 +#define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +#ifdef I40E_FCOE +/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */ + +enum i40E_fcoe_tx_ctx_desc_cmd_bits { + I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10, + I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20, + I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40, + I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80 +}; + +/* FCoE DDP Context descriptor */ +struct i40e_fcoe_ddp_context_desc { + __le64 rsvd; + __le64 type_cmd_foff_lsize; +}; + +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4 +#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) + +enum i40e_fcoe_ddp_ctx_desc_cmd_bits { + I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */ + I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */ +}; + +#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16 +#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32 +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT) + +/* FCoE DDP/DWO Queue Context descriptor */ +struct i40e_fcoe_queue_context_desc { + __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */ + __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */ +}; + +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12 +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13 +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +enum i40e_fcoe_queue_ctx_desc_tph_bits { + I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1, + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2 +}; + +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30 +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT) + +/* FCoE DDP/DWO Filter Context descriptor */ +struct i40e_fcoe_filter_context_desc { + __le32 param; + __le16 seqn; + + /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */ + __le16 rsvd_dmaindx; + + /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */ + __le64 flags_rsvd_lanq; +}; + +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4 +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \ + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT) + +enum i40e_fcoe_filter_ctx_desc_flags_bits { + I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00, + I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01, + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00, + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04 +}; + +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0 +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \ + I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \ + I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53 +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \ + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT) + +#endif /* I40E_FCOE */ +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 +#endif /* _I40E_TYPE_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h new file mode 100644 index 000000000..2d20af290 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -0,0 +1,362 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +#include "i40e_type.h" + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + i40e_status v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 0 +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * VF sends this request to PF with no parameters + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 max_fcoe_contexts; + u32 max_fcoe_filters; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c new file mode 100644 index 000000000..4e9376da0 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -0,0 +1,2402 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" + +/*********************notification routines***********************/ + +/** + * i40e_vc_vf_broadcast + * @pf: pointer to the PF structure + * @opcode: operation code + * @retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send a message to all VFs on a given PF + **/ +static void i40e_vc_vf_broadcast(struct i40e_pf *pf, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, + u16 msglen) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + } +} + +/** + * i40e_vc_notify_link_state + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + **/ +static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * i40e_vc_notify_link_state + * @pf: pointer to the PF structure + * + * send a link status message to all VFs on a given PF + **/ +void i40e_vc_notify_link_state(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + i40e_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * i40e_vc_notify_reset + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + **/ +void i40e_vc_notify_reset(struct i40e_pf *pf) +{ + struct i40e_virtchnl_pf_event pfe; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); +} + +/** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure + * + * indicate a pending reset to the given VF + **/ +void i40e_vc_notify_vf_reset(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + int abs_vf_id; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + return; + + abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, + sizeof(struct i40e_virtchnl_pf_event), NULL); +} +/***********************misc routines*****************************/ + +/** + * i40e_vc_disable_vf + * @pf: pointer to the PF info + * @vf: pointer to the VF info + * + * Disable the VF through a SW reset + **/ +static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg; + + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + i40e_flush(hw); +} + +/** + * i40e_vc_isvalid_vsi_id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id + * + * check for the valid VSI id + **/ +static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + + return (vsi && (vsi->vf_id == vf->vf_id)); +} + +/** + * i40e_vc_isvalid_queue_id + * @vf: pointer to the VF info + * @vsi_id: vsi id + * @qid: vsi relative queue id + * + * check for the valid queue id + **/ +static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, + u8 qid) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + + return (vsi && (qid < vsi->alloc_queue_pairs)); +} + +/** + * i40e_vc_isvalid_vector_id + * @vf: pointer to the VF info + * @vector_id: VF relative vector id + * + * check for the valid vector id + **/ +static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) +{ + struct i40e_pf *pf = vf->pf; + + return vector_id < pf->hw.func_caps.num_msix_vectors_vf; +} + +/***********************vf resource mgmt routines*****************/ + +/** + * i40e_vc_get_pf_queue_id + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue id + * + * return PF relative queue id + **/ +static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, + u8 vsi_queue_id) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; + + if (!vsi) + return pf_queue_id; + + if (le16_to_cpu(vsi->info.mapping_flags) & + I40E_AQ_VSI_QUE_MAP_NONCONTIG) + pf_queue_id = + le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); + else + pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + + vsi_queue_id; + + return pf_queue_id; +} + +/** + * i40e_config_irq_link_list + * @vf: pointer to the VF info + * @vsi_id: id of VSI as given by the FW + * @vecmap: irq map info + * + * configure irq link list from the map + **/ +static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, + struct i40e_virtchnl_vector_map *vecmap) +{ + unsigned long linklistmap = 0, tempmap; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u16 vsi_queue_id, pf_queue_id; + enum i40e_queue_type qtype; + u16 next_q, vector_id; + u32 reg, reg_idx; + u16 itr_idx = 0; + + vector_id = vecmap->vector_id; + /* setup the head */ + if (0 == vector_id) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); + else + reg_idx = I40E_VPINT_LNKLSTN( + ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + + (vector_id - 1)); + + if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { + /* Special case - No queues mapped on this vector */ + wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); + goto irq_list_done; + } + tempmap = vecmap->rxq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + linklistmap |= (1 << + (I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id)); + } + + tempmap = vecmap->txq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + linklistmap |= (1 << + (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id + + 1)); + } + + next_q = find_first_bit(&linklistmap, + (I40E_MAX_VSI_QP * + I40E_VIRTCHNL_SUPPORTED_QTYPES)); + vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); + + wr32(hw, reg_idx, reg); + + while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + switch (qtype) { + case I40E_QUEUE_TYPE_RX: + reg_idx = I40E_QINT_RQCTL(pf_queue_id); + itr_idx = vecmap->rxitr_idx; + break; + case I40E_QUEUE_TYPE_TX: + reg_idx = I40E_QINT_TQCTL(pf_queue_id); + itr_idx = vecmap->txitr_idx; + break; + default: + break; + } + + next_q = find_next_bit(&linklistmap, + (I40E_MAX_VSI_QP * + I40E_VIRTCHNL_SUPPORTED_QTYPES), + next_q + 1); + if (next_q < + (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, + vsi_queue_id); + } else { + pf_queue_id = I40E_QUEUE_END_OF_LIST; + qtype = 0; + } + + /* format for the RQCTL & TQCTL regs is same */ + reg = (vector_id) | + (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + wr32(hw, reg_idx, reg); + } + +irq_list_done: + i40e_flush(hw); +} + +/** + * i40e_config_vsi_tx_queue + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue index + * @info: config. info + * + * configure tx queue + **/ +static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, + u16 vsi_queue_id, + struct i40e_virtchnl_txq_info *info) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_hmc_obj_txq tx_ctx; + struct i40e_vsi *vsi; + u16 pf_queue_id; + u32 qtx_ctl; + int ret = 0; + + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + vsi = i40e_find_vsi_from_id(pf, vsi_id); + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); + + /* only set the required fields */ + tx_ctx.base = info->dma_ring_addr / 128; + tx_ctx.qlen = info->ring_len; + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); + tx_ctx.rdylist_act = 0; + tx_ctx.head_wb_ena = info->headwb_enabled; + tx_ctx.head_wb_addr = info->dma_headwb_addr; + + /* clear the context in the HMC */ + ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to clear VF LAN Tx queue context %d, error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_context; + } + + /* set the context in the HMC */ + ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to set VF LAN Tx queue context %d error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_context; + } + + /* associate this queue with the PCI VF function */ + qtx_ctl = I40E_QTX_CTL_VF_QUEUE; + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) + & I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) + << I40E_QTX_CTL_VFVM_INDX_SHIFT) + & I40E_QTX_CTL_VFVM_INDX_MASK); + wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); + i40e_flush(hw); + +error_context: + return ret; +} + +/** + * i40e_config_vsi_rx_queue + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue index + * @info: config. info + * + * configure rx queue + **/ +static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, + u16 vsi_queue_id, + struct i40e_virtchnl_rxq_info *info) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_hmc_obj_rxq rx_ctx; + u16 pf_queue_id; + int ret = 0; + + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + + /* clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + + /* only set the required fields */ + rx_ctx.base = info->dma_ring_addr / 128; + rx_ctx.qlen = info->ring_len; + + if (info->splithdr_enabled) { + rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | + I40E_RX_SPLIT_IP | + I40E_RX_SPLIT_TCP_UDP | + I40E_RX_SPLIT_SCTP; + /* header length validation */ + if (info->hdr_size > ((2 * 1024) - 64)) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; + + /* set splitalways mode 10b */ + rx_ctx.dtype = 0x2; + } + + /* databuffer length validation */ + if (info->databuffer_size > ((16 * 1024) - 128)) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; + + /* max pkt. length validation */ + if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.rxmax = info->max_pkt_size; + + /* enable 32bytes desc always */ + rx_ctx.dsize = 1; + + /* default values */ + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 1; + rx_ctx.prefena = 1; + rx_ctx.l2tsel = 1; + + /* clear the context in the HMC */ + ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to clear VF LAN Rx queue context %d, error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_param; + } + + /* set the context in the HMC */ + ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to set VF LAN Rx queue context %d error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_param; + } + +error_param: + return ret; +} + +/** + * i40e_alloc_vsi_res + * @vf: pointer to the VF info + * @type: type of VSI to allocate + * + * alloc VF vsi context & resources + **/ +static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) +{ + struct i40e_mac_filter *f = NULL; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi; + int ret = 0; + + vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); + + if (!vsi) { + dev_err(&pf->pdev->dev, + "add vsi failed for VF %d, aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); + ret = -ENOENT; + goto error_alloc_vsi_res; + } + if (type == I40E_VSI_SRIOV) { + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_id = vsi->id; + /* If the port VLAN has been configured and then the + * VF driver was removed then the VSI port VLAN + * configuration was destroyed. Check if there is + * a port VLAN and restore the VSI configuration if + * needed. + */ + if (vf->port_vlan_id) + i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id, true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF MAC addr\n"); + f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF broadcast filter\n"); + } + + /* program mac filter */ + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); + + /* Set VF bandwidth if specified */ + if (vf->tx_rate) { + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, + vf->tx_rate / 50, 0, NULL); + if (ret) + dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", + vf->vf_id, ret); + } + +error_alloc_vsi_res: + return ret; +} + +/** + * i40e_enable_vf_mappings + * @vf: pointer to the VF info + * + * enable VF mappings + **/ +static void i40e_enable_vf_mappings(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg, total_queue_pairs = 0; + int j; + + /* Tell the hardware we're using noncontiguous mapping. HW requires + * that VF queues be mapped using this method, even when they are + * contiguous in real life + */ + wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + + /* enable VF vplan_qtable mappings */ + reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; + wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); + + /* map PF queues to VF queues */ + for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); + wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); + total_queue_pairs++; + } + + /* map PF queues to VSI */ + for (j = 0; j < 7; j++) { + if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { + reg = 0x07FF07FF; /* unused */ + } else { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, + j * 2); + reg = qid; + qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, + (j * 2) + 1); + reg |= qid << 16; + } + wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); + } + + i40e_flush(hw); +} + +/** + * i40e_disable_vf_mappings + * @vf: pointer to the VF info + * + * disable VF mappings + **/ +static void i40e_disable_vf_mappings(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + int i; + + /* disable qp mappings */ + wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); + for (i = 0; i < I40E_MAX_VSI_QP; i++) + wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), + I40E_QUEUE_END_OF_LIST); + i40e_flush(hw); +} + +/** + * i40e_free_vf_res + * @vf: pointer to the VF info + * + * free VF resources + **/ +static void i40e_free_vf_res(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg_idx, reg; + int i, msix_vf; + + /* free vsi & disconnect it from the parent uplink */ + if (vf->lan_vsi_idx) { + i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; + vf->lan_vsi_id = 0; + } + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + /* disable interrupts so the VF starts in a known state */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); + else + reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + i40e_flush(hw); + } + + /* clear the irq settings */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); + else + reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + wr32(hw, reg_idx, reg); + i40e_flush(hw); + } + /* reset some of the state varibles keeping + * track of the resources + */ + vf->num_queue_pairs = 0; + vf->vf_states = 0; +} + +/** + * i40e_alloc_vf_res + * @vf: pointer to the VF info + * + * allocate VF resources + **/ +static int i40e_alloc_vf_res(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + int total_queue_pairs = 0; + int ret; + + /* allocate hw vsi context & associated resources */ + ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); + if (ret) + goto error_alloc; + total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + /* store the total qps number for the runtime + * VF req validation + */ + vf->num_queue_pairs = total_queue_pairs; + + /* VF is now completely initialized */ + set_bit(I40E_VF_STAT_INIT, &vf->vf_states); + +error_alloc: + if (ret) + i40e_free_vf_res(vf); + + return ret; +} + +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_MASK 0x20 +/** + * i40e_quiesce_vf_pci + * @vf: pointer to the VF structure + * + * Wait for VF PCI transactions to be cleared after reset. Returns -EIO + * if the transactions never clear. + **/ +static int i40e_quiesce_vf_pci(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + int vf_abs_id, i; + u32 reg; + + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + wr32(hw, I40E_PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, I40E_PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_MASK) == 0) + return 0; + udelay(1); + } + return -EIO; +} + +/** + * i40e_reset_vf + * @vf: pointer to the VF structure + * @flr: VFLR was issued or not + * + * reset the VF + **/ +void i40e_reset_vf(struct i40e_vf *vf, bool flr) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + bool rsd = false; + int i; + u32 reg; + + if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + return; + + /* warn the VF */ + clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!flr) { + /* reset VF using VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + i40e_flush(hw); + } + + if (i40e_quiesce_vf_pci(vf)) + dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", + vf->vf_id); + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. Due to internal HW FIFO flushes, + * we must wait 10ms before the register will be valid. + */ + usleep_range(10000, 20000); + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { + rsd = true; + break; + } + } + + if (flr) + usleep_range(10000, 20000); + + if (!rsd) + dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); + /* clear the reset bit in the VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + + /* On initial reset, we won't have any queues */ + if (vf->lan_vsi_idx == 0) + goto complete_reset; + + i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); +complete_reset: + /* reallocate VF resources to reset the VSI state */ + i40e_free_vf_res(vf); + i40e_alloc_vf_res(vf); + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* tell the VF the reset is done */ + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + i40e_flush(hw); + clear_bit(__I40E_VF_DISABLE, &pf->state); +} + +/** + * i40e_free_vfs + * @pf: pointer to the PF structure + * + * free VF resources + **/ +void i40e_free_vfs(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg_idx, bit_idx; + int i, tmp, vf_id; + + if (!pf->vf) + return; + while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + usleep_range(1000, 2000); + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + false); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + + msleep(20); /* let any messages in transit get finished up */ + + /* free up VF resources */ + tmp = pf->num_alloc_vfs; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_free_vf_res(&pf->vf[i]); + /* disable qp mappings */ + i40e_disable_vf_mappings(&pf->vf[i]); + } + + kfree(pf->vf); + pf->vf = NULL; + + /* This check is for when the driver is unloaded while VFs are + * assigned. Setting the number of VFs to 0 through sysfs is caught + * before this function ever gets called. + */ + if (!pci_vfs_assigned(pf->pdev)) { + /* Acknowledge VFLR for all VFS. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + } + } + clear_bit(__I40E_VF_DISABLE, &pf->state); +} + +#ifdef CONFIG_PCI_IOV +/** + * i40e_alloc_vfs + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate + * + * allocate VF resources + **/ +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) +{ + struct i40e_vf *vfs; + int i, ret = 0; + + /* Disable interrupt 0 so we don't try to handle the VFLR. */ + i40e_irq_dynamic_disable_icr0(pf); + + /* Check to see if we're just allocating resources for extant VFs */ + if (pci_num_vf(pf->pdev) != num_alloc_vfs) { + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to enable SR-IOV, error %d.\n", ret); + pf->num_alloc_vfs = 0; + goto err_iov; + } + } + /* allocate memory */ + vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); + if (!vfs) { + ret = -ENOMEM; + goto err_alloc; + } + pf->vf = vfs; + + /* apply default profile */ + for (i = 0; i < num_alloc_vfs; i++) { + vfs[i].pf = pf; + vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; + vfs[i].vf_id = i; + + /* assign default capabilities */ + set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); + vfs[i].spoofchk = true; + /* VF resources get allocated during reset */ + i40e_reset_vf(&vfs[i], false); + + /* enable VF vplan_qtable mappings */ + i40e_enable_vf_mappings(&vfs[i]); + } + pf->num_alloc_vfs = num_alloc_vfs; + +err_alloc: + if (ret) + i40e_free_vfs(pf); +err_iov: + /* Re-enable interrupt 0. */ + i40e_irq_dynamic_enable_icr0(pf); + return ret; +} + +#endif +/** + * i40e_pci_sriov_enable + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs + **/ +static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct i40e_pf *pf = pci_get_drvdata(pdev); + int pre_existing_vfs = pci_num_vf(pdev); + int err = 0; + + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + i40e_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (num_vfs > pf->num_req_vfs) { + err = -EPERM; + goto err_out; + } + + err = i40e_alloc_vfs(pf, num_vfs); + if (err) { + dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); + goto err_out; + } + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +/** + * i40e_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + if (num_vfs) { + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + return i40e_pci_sriov_enable(pdev, num_vfs); + } + + if (!pci_vfs_assigned(pf->pdev)) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } else { + dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); + return -EINVAL; + } + return 0; +} + +/***********************virtual channel routines******************/ + +/** + * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + **/ +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + struct i40e_pf *pf; + struct i40e_hw *hw; + int abs_vf_id; + i40e_status aq_ret; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + hw = &pf->hw; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* single place to detect unsuccessful return values */ + if (v_retval) { + vf->num_invalid_msgs++; + dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", + v_opcode, v_retval); + if (vf->num_invalid_msgs > + I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { + dev_err(&pf->pdev->dev, + "Number of invalid messages exceeded for VF %d\n", + vf->vf_id); + dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } + } else { + vf->num_valid_msgs++; + } + + aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { + dev_err(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); + return -EIO; + } + + return 0; +} + +/** + * i40e_vc_send_resp_to_vf + * @vf: pointer to the VF info + * @opcode: operation code + * @retval: return value + * + * send resp msg to VF + **/ +static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, + enum i40e_virtchnl_ops opcode, + i40e_status retval) +{ + return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); +} + +/** + * i40e_vc_get_version_msg + * @vf: pointer to the VF info + * + * called from the VF to request the API version used by the PF + **/ +static int i40e_vc_get_version_msg(struct i40e_vf *vf) +{ + struct i40e_virtchnl_version_info info = { + I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR + }; + + return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + I40E_SUCCESS, (u8 *)&info, + sizeof(struct + i40e_virtchnl_version_info)); +} + +/** + * i40e_vc_get_vf_resources_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to request its resources + **/ +static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) +{ + struct i40e_virtchnl_vf_resource *vfres = NULL; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + int i = 0, len = 0; + int num_vsis = 1; + int ret; + + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + len = (sizeof(struct i40e_virtchnl_vf_resource) + + sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); + + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + aq_ret = I40E_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi->info.pvid) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + + vfres->num_vsis = num_vsis; + vfres->num_queue_pairs = vf->num_queue_pairs; + vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; + if (vf->lan_vsi_idx) { + vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; + vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[i].num_queue_pairs = + pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + memcpy(vfres->vsi_res[i].default_mac_addr, + vf->default_lan_addr.addr, ETH_ALEN); + i++; + } + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + +err: + /* send the response back to the VF */ + ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + aq_ret, (u8 *)vfres, len); + + kfree(vfres); + return ret; +} + +/** + * i40e_vc_reset_vf_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF + **/ +static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) +{ + if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + i40e_reset_vf(vf, false); +} + +/** + * i40e_vc_config_promiscuous_mode_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the promiscuous mode of + * VF vsis + **/ +static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, + u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_promisc_info *info = + (struct i40e_virtchnl_promisc_info *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; + bool allmulti = false; + i40e_status aq_ret; + + vsi = i40e_find_vsi_from_id(pf, info->vsi_id); + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || + (vsi->type != I40E_VSI_FCOE)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + allmulti, NULL); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + aq_ret); +} + +/** + * i40e_vc_config_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the rx/tx + * queues + **/ +static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vsi_queue_config_info *qci = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + struct i40e_virtchnl_queue_pair_info *qpi; + struct i40e_pf *pf = vf->pf; + u16 vsi_id, vsi_queue_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + vsi_id = qci->vsi_id; + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { + qpi = &qci->qpair[i]; + vsi_queue_id = qpi->txq.queue_id; + if ((qpi->txq.vsi_id != vsi_id) || + (qpi->rxq.vsi_id != vsi_id) || + (qpi->rxq.queue_id != vsi_queue_id) || + !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, + &qpi->rxq) || + i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, + &qpi->txq)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + /* set vsi num_queue_pairs in use to num configured by VF */ + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + aq_ret); +} + +/** + * i40e_vc_config_irq_map_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the irq to + * queue map + **/ +static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_irq_map_info *irqmap_info = + (struct i40e_virtchnl_irq_map_info *)msg; + struct i40e_virtchnl_vector_map *map; + u16 vsi_id, vsi_queue_id, vector_id; + i40e_status aq_ret = 0; + unsigned long tempmap; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < irqmap_info->num_vectors; i++) { + map = &irqmap_info->vecmap[i]; + + vector_id = map->vector_id; + vsi_id = map->vsi_id; + /* validate msg params */ + if (!i40e_vc_isvalid_vector_id(vf, vector_id) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + /* lookout for the invalid queue index */ + tempmap = map->rxq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, + vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + tempmap = map->txq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, + vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + i40e_config_irq_link_list(vf, vsi_id, map); + } +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + aq_ret); +} + +/** + * i40e_vc_enable_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to enable all or specific queue(s) + **/ +static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + u16 vsi_id = vqs->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) + aq_ret = I40E_ERR_TIMEOUT; +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + aq_ret); +} + +/** + * i40e_vc_disable_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to disable all or specific + * queue(s) + **/ +static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) + aq_ret = I40E_ERR_TIMEOUT; + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + aq_ret); +} + +/** + * i40e_vc_get_stats_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to get vsi stats + **/ +static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_eth_stats stats; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + + memset(&stats, 0, sizeof(struct i40e_eth_stats)); + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + i40e_update_eth_stats(vsi); + stats = vsi->eth_stats; + +error_param: + /* send the response back to the VF */ + return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, + (u8 *)&stats, sizeof(stats)); +} + +/** + * i40e_check_vf_permission + * @vf: pointer to the VF info + * @macaddr: pointer to the MAC Address being checked + * + * Check if the VF has permission to add or delete unicast MAC address + * filters and return error code -EPERM if not. Then check if the + * address filter requested is broadcast or zero and if so return + * an invalid MAC address error code. + **/ +static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) +{ + struct i40e_pf *pf = vf->pf; + int ret = 0; + + if (is_broadcast_ether_addr(macaddr) || + is_zero_ether_addr(macaddr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); + ret = I40E_ERR_INVALID_MAC_ADDR; + } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && + !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { + /* If the host VMM administrator has set the VF MAC address + * administratively via the ndo_set_vf_mac command then deny + * permission to the VF to add or delete unicast MAC addresses. + * The VF may request to set the MAC address filter already + * assigned to it so do not return an error in that case. + */ + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); + ret = -EPERM; + } + return ret; +} + +/** + * i40e_vc_add_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * add guest mac address filter + **/ +static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_ether_addr_list *al = + (struct i40e_virtchnl_ether_addr_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = al->vsi_id; + i40e_status ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < al->num_elements; i++) { + ret = i40e_check_vf_permission(vf, al->list[i].addr); + if (ret) + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; + + /* add new addresses to the list */ + for (i = 0; i < al->num_elements; i++) { + struct i40e_mac_filter *f; + + f = i40e_find_mac(vsi, al->list[i].addr, true, false); + if (!f) { + if (i40e_is_vsi_in_vlan(vsi)) + f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, + true, false); + else + f = i40e_add_filter(vsi, al->list[i].addr, -1, + true, false); + } + + if (!f) { + dev_err(&pf->pdev->dev, + "Unable to add VF MAC filter\n"); + ret = I40E_ERR_PARAM; + goto error_param; + } + } + + /* program the updated filter list */ + if (i40e_sync_vsi_filters(vsi)) + dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + ret); +} + +/** + * i40e_vc_del_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * remove guest mac address filter + **/ +static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_ether_addr_list *al = + (struct i40e_virtchnl_ether_addr_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = al->vsi_id; + i40e_status ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < al->num_elements; i++) { + if (is_broadcast_ether_addr(al->list[i].addr) || + is_zero_ether_addr(al->list[i].addr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", + al->list[i].addr); + ret = I40E_ERR_INVALID_MAC_ADDR; + goto error_param; + } + } + vsi = pf->vsi[vf->lan_vsi_idx]; + + /* delete addresses from the list */ + for (i = 0; i < al->num_elements; i++) + i40e_del_filter(vsi, al->list[i].addr, + I40E_VLAN_ANY, true, false); + + /* program the updated filter list */ + if (i40e_sync_vsi_filters(vsi)) + dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + ret); +} + +/** + * i40e_vc_add_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * program guest vlan id + **/ +static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vfl->vsi_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > I40E_MAX_VLANID) { + aq_ret = I40E_ERR_PARAM; + dev_err(&pf->pdev->dev, + "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + goto error_param; + } + } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + i40e_vlan_stripping_enable(vsi); + for (i = 0; i < vfl->num_elements; i++) { + /* add new VLAN filter */ + int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (ret) + dev_err(&pf->pdev->dev, + "Unable to add VF vlan filter %d, error %d\n", + vfl->vlan_id[i], ret); + } + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); +} + +/** + * i40e_vc_remove_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * remove programmed guest vlan id + **/ +static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vfl->vsi_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > I40E_MAX_VLANID) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (ret) + dev_err(&pf->pdev->dev, + "Unable to delete VF vlan filter %d, error %d\n", + vfl->vlan_id[i], ret); + } + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); +} + +/** + * i40e_vc_validate_vf_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * @msghndl: msg handle + * + * validate msg + **/ +static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len; + + /* Check if VF is disabled. */ + if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) + return I40E_ERR_PARAM; + + /* Validate message length. */ + switch (v_opcode) { + case I40E_VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct i40e_virtchnl_version_info); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + valid_len = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_txq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_rxq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_vsi_queue_config_info *vqc = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + i40e_virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct i40e_virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_irq_map_info *vimi = + (struct i40e_virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct i40e_virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_ether_addr_list *veal = + (struct i40e_virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct i40e_virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + case I40E_VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct i40e_virtchnl_promisc_info); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + /* These are always errors coming from the VF. */ + case I40E_VIRTCHNL_OP_EVENT: + case I40E_VIRTCHNL_OP_UNKNOWN: + default: + return -EPERM; + break; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) { + i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); + return -EINVAL; + } else { + return 0; + } +} + +/** + * i40e_vc_process_vf_msg + * @pf: pointer to the PF structure + * @vf_id: source VF id + * @msg: pointer to the msg buffer + * @msglen: msg length + * @msghndl: msg handle + * + * called from the common aeq/arq handler to + * process request from VF + **/ +int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + struct i40e_hw *hw = &pf->hw; + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; + struct i40e_vf *vf; + int ret; + + pf->vf_aq_requests++; + if (local_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + vf = &(pf->vf[local_vf_id]); + /* perform basic checks on the msg */ + ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); + + if (ret) { + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", + local_vf_id, v_opcode, msglen); + return ret; + } + + switch (v_opcode) { + case I40E_VIRTCHNL_OP_VERSION: + ret = i40e_vc_get_version_msg(vf); + break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + ret = i40e_vc_get_vf_resources_msg(vf); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + i40e_vc_reset_vf_msg(vf); + ret = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + ret = i40e_vc_config_queues_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + i40e_vc_notify_vf_link_state(vf); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + ret = i40e_vc_disable_queues_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + ret = i40e_vc_add_vlan_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + ret = i40e_vc_get_stats_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_UNKNOWN: + default: + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", + v_opcode, local_vf_id); + ret = i40e_vc_send_resp_to_vf(vf, v_opcode, + I40E_ERR_NOT_IMPLEMENTED); + break; + } + + return ret; +} + +/** + * i40e_vc_process_vflr_event + * @pf: pointer to the PF structure + * + * called from the vlfr irq handler to + * free up VF resources and state variables + **/ +int i40e_vc_process_vflr_event(struct i40e_pf *pf) +{ + u32 reg, reg_idx, bit_idx, vf_id; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + + if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + return 0; + + /* re-enable vflr interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); + + clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + vf = &pf->vf[vf_id]; + reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); + if (reg & (1 << bit_idx)) { + /* clear the bit in GLGEN_VFLRSTAT */ + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + + if (!test_bit(__I40E_DOWN, &pf->state)) + i40e_reset_vf(vf, true); + } + } + + return 0; +} + +/** + * i40e_ndo_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + **/ +int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_mac_filter *f; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, + "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, + "Invalid VF ethernet address\n"); + ret = -EINVAL; + goto error_param; + } + + /* delete the temporary mac address */ + i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + true, false); + + /* Delete all the filters for this VSI - we're going to kill it + * anyway. + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); + /* program mac filter */ + if (i40e_sync_vsi_filters(vsi)) { + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); + ret = -EIO; + goto error_param; + } + ether_addr_copy(vf->default_lan_addr.addr, mac); + vf->pf_set_mac = true; + /* Force the VF driver stop so it has to reload with new MAC address */ + i40e_vc_disable_vf(pf, vf); + dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + +error_param: + return ret; +} + +/** + * i40e_ndo_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: mac address + * @qos: priority setting + * + * program VF vlan id and/or qos + **/ +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_pvid; + } + + if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { + dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + ret = -EINVAL; + goto error_pvid; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_pvid; + } + + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { + dev_err(&pf->pdev->dev, + "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", + vf_id); + /* Administrator Error - knock the VF offline until he does + * the right thing by reconfiguring his network correctly + * and then reloading the VF driver. + */ + i40e_vc_disable_vf(pf, vf); + } + + /* Check for condition where there was already a port VLAN ID + * filter set and now it is being deleted by setting it to zero. + * Additionally check for the condition where there was a port + * VLAN but now there is a new and different port VLAN being set. + * Before deleting all the old VLAN filters we must add new ones + * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our + * MAC addresses deleted. + */ + if ((!(vlan_id || qos) || + (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && + vsi->info.pvid) + ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); + + if (vsi->info.pvid) { + /* kill old VLAN */ + ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & + VLAN_VID_MASK)); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "remove VLAN failed, ret=%d, aq_err=%d\n", + ret, pf->hw.aq.asq_last_status); + } + } + if (vlan_id || qos) + ret = i40e_vsi_add_pvid(vsi, + vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); + else + i40e_vsi_remove_pvid(vsi); + + if (vlan_id) { + dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan_id, qos, vf_id); + + /* add new VLAN filter */ + ret = i40e_vsi_add_vlan(vsi, vlan_id); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add VF VLAN failed, ret=%d aq_err=%d\n", ret, + vsi->back->hw.aq.asq_last_status); + goto error_pvid; + } + /* Kill non-vlan MAC filters - ignore error return since + * there might not be any non-vlan MAC filters. + */ + i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); + } + + if (ret) { + dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); + goto error_pvid; + } + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + ret = 0; + +error_pvid: + return ret; +} + +#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ +/** + * i40e_ndo_set_vf_bw + * @netdev: network interface device structure + * @vf_id: VF identifier + * @tx_rate: Tx rate + * + * configure VF Tx rate + **/ +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int speed = 0; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); + ret = -EINVAL; + goto error; + } + + if (min_tx_rate) { + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); + ret = -EINVAL; + goto error; + } + + switch (pf->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + speed = 40000; + break; + case I40E_LINK_SPEED_10GB: + speed = 10000; + break; + case I40E_LINK_SPEED_1GB: + speed = 1000; + break; + default: + break; + } + + if (max_tx_rate > speed) { + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", + max_tx_rate, vf->vf_id); + ret = -EINVAL; + goto error; + } + + if ((max_tx_rate < 50) && (max_tx_rate > 0)) { + dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = 50; + } + + /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + I40E_MAX_BW_INACTIVE_ACCUM, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", + ret); + ret = -EIO; + goto error; + } + vf->tx_rate = max_tx_rate; +error: + return ret; +} + +/** + * i40e_ndo_get_vf_config + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + **/ +int i40e_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ivi) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + vf = &(pf->vf[vf_id]); + /* first vsi is always the LAN vsi */ + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + ivi->vf = vf_id; + + memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; + ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> + I40E_VLAN_PRIORITY_SHIFT; + if (vf->link_forced == false) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up == true) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + ivi->spoofchk = vf->spoofchk; + ret = 0; + +error_param: + return ret; +} + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int abs_vf_id; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_out; + } + + vf = &pf->vf[vf_id]; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + pfe.event_data.link_event.link_status = + pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + pf->hw.phy.link_info.link_speed; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: + return ret; +} + +/** + * i40e_ndo_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @enable: flag to enable or disable feature + * + * Enable or disable VF spoof checking + **/ +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_vsi_context ctxt; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto out; + } + + vf = &(pf->vf[vf_id]); + + if (enable == vf->spoofchk) + goto out; + + vf->spoofchk = enable; + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (enable) + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", + ret); + ret = -EIO; + } +out: + return ret; +} diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h new file mode 100644 index 000000000..09043c1aa --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -0,0 +1,130 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_PF_H_ +#define _I40E_VIRTCHNL_PF_H_ + +#include "i40e.h" + +#define I40E_MAX_MACVLAN_FILTERS 256 +#define I40E_MAX_VLAN_FILTERS 256 +#define I40E_MAX_VLANID 4095 + +#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 + +#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 +#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 + +#define I40E_VLAN_PRIORITY_SHIFT 12 +#define I40E_VLAN_MASK 0xFFF +#define I40E_PRIORITY_MASK 0x7000 + +/* Various queue ctrls */ +enum i40e_queue_ctrl { + I40E_QUEUE_CTRL_UNKNOWN = 0, + I40E_QUEUE_CTRL_ENABLE, + I40E_QUEUE_CTRL_ENABLECHECK, + I40E_QUEUE_CTRL_DISABLE, + I40E_QUEUE_CTRL_DISABLECHECK, + I40E_QUEUE_CTRL_FASTDISABLE, + I40E_QUEUE_CTRL_FASTDISABLECHECK, +}; + +/* VF states */ +enum i40e_vf_states { + I40E_VF_STAT_INIT = 0, + I40E_VF_STAT_ACTIVE, + I40E_VF_STAT_FCOEENA, + I40E_VF_STAT_DISABLED, +}; + +/* VF capabilities */ +enum i40e_vf_capabilities { + I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, + I40E_VIRTCHNL_VF_CAP_L2, +}; + +/* VF information structure */ +struct i40e_vf { + struct i40e_pf *pf; + + /* VF id in the PF space */ + u16 vf_id; + /* all VF vsis connect to the same parent */ + enum i40e_switch_element_types parent_type; + + /* VF Port Extender (PE) stag if used */ + u16 stag; + + struct i40e_virtchnl_ether_addr default_lan_addr; + struct i40e_virtchnl_ether_addr default_fcoe_addr; + u16 port_vlan_id; + bool pf_set_mac; /* The VMM admin set the VF MAC address */ + + /* VSI indices - actual VSI pointers are maintained in the PF structure + * When assigned, these will be non-zero, because VSI 0 is always + * the main LAN VSI for the PF. + */ + u8 lan_vsi_idx; /* index into PF struct */ + u8 lan_vsi_id; /* ID as used by firmware */ + + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ + u64 num_mdd_events; /* num of mdd events detected */ + u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ + u64 num_valid_msgs; /* num of valid msgs detected */ + + unsigned long vf_caps; /* vf's adv. capabilities */ + unsigned long vf_states; /* vf's runtime states */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; +}; + +void i40e_free_vfs(struct i40e_pf *pf); +int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); +int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen); +int i40e_vc_process_vflr_event(struct i40e_pf *pf); +void i40e_reset_vf(struct i40e_vf *vf, bool flr); +void i40e_vc_notify_vf_reset(struct i40e_vf *vf); + +/* VF configuration related iplink handlers */ +int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos); +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); +int i40e_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); + +void i40e_vc_notify_link_state(struct i40e_pf *pf); +void i40e_vc_notify_reset(struct i40e_pf *pf); + +#endif /* _I40E_VIRTCHNL_PF_H_ */ -- cgit 1.2.3-korg