From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/drivers/net/ethernet/intel/ixgbevf/Makefile | 38 + .../drivers/net/ethernet/intel/ixgbevf/defines.h | 299 ++ .../drivers/net/ethernet/intel/ixgbevf/ethtool.c | 888 ++++ .../drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 504 +++ .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 4277 ++++++++++++++++++++ kernel/drivers/net/ethernet/intel/ixgbevf/mbx.c | 348 ++ kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h | 128 + kernel/drivers/net/ethernet/intel/ixgbevf/regs.h | 84 + kernel/drivers/net/ethernet/intel/ixgbevf/vf.c | 752 ++++ kernel/drivers/net/ethernet/intel/ixgbevf/vf.h | 215 + 10 files changed, 7533 insertions(+) create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/Makefile create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/defines.h create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/mbx.c create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/regs.h create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/vf.c create mode 100644 kernel/drivers/net/ethernet/intel/ixgbevf/vf.h (limited to 'kernel/drivers/net/ethernet/intel/ixgbevf') diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/Makefile b/kernel/drivers/net/ethernet/intel/ixgbevf/Makefile new file mode 100644 index 000000000..4ce4c97ef --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel 82599 Virtual Function driver +# Copyright(c) 1999 - 2012 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82599 VF ethernet driver +# + +obj-$(CONFIG_IXGBEVF) += ixgbevf.o + +ixgbevf-objs := vf.o \ + mbx.o \ + ethtool.o \ + ixgbevf_main.o + diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h b/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h new file mode 100644 index 000000000..770e21a64 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -0,0 +1,299 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_DEFINES_H_ +#define _IXGBEVF_DEFINES_H_ + +/* Device IDs */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +/* DCB define */ +#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 + +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 + +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 + +/* DCA Control */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Interrupt register bitmasks */ + +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 + +/* Error Codes */ +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ + +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c new file mode 100644 index 000000000..b2f5b161d --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -0,0 +1,888 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbevf */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +#define IXGBE_ALL_RAR_ENTRIES 16 + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + struct { + int sizeof_stat; + int stat_offset; + int base_stat_offset; + int saved_reset_offset; + }; +}; + +#define IXGBEVF_STAT(m, b, r) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ + .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +} + +#define IXGBEVF_ZSTAT(m) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = -1, \ + .saved_reset_offset = -1 \ +} + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, + stats.saved_reset_vfgprc)}, + {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, + stats.saved_reset_vfgptc)}, + {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, + stats.saved_reset_vfgorc)}, + {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, + stats.saved_reset_vfgotc)}, + {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, + {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, + {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, + {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, + stats.saved_reset_vfmprc)}, + {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, +#ifdef BP_EXTENDED_STATS + {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, + {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, + {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, + {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, + {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, + {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, +#endif +}; + +#define IXGBE_QUEUE_STATS_LEN 0 +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + +#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; + +#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) + +static int ixgbevf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = 0; + bool link_up; + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = -1; + + hw->mac.get_link_status = 1; + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (link_up) { + __u32 speed = SPEED_10000; + + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed = SPEED_100; + break; + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static u32 ixgbevf_get_msglevel(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +static int ixgbevf_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 45 + return IXGBE_REGS_LEN * sizeof(u32); +} + +static void ixgbevf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 regs_len = ixgbevf_get_regs_len(netdev); + u8 i; + + memset(p, 0, regs_len); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR + */ + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + + /* Receive DMA */ + for (i = 0; i < 2; i++) + regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); + for (i = 0; i < 2; i++) + regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); + for (i = 0; i < 2; i++) + regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + + /* Receive */ + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); + + /* Transmit */ + for (i = 0; i < 2; i++) + regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); + for (i = 0; i < 2; i++) + regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); + for (i = 0; i < 2; i++) + regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); +} + +static void ixgbevf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbevf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void ixgbevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IXGBEVF_MAX_RXD; + ring->tx_max_pending = IXGBEVF_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int ixgbevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; + u32 new_rx_count, new_tx_count; + int i, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + return 0; + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (new_tx_count != adapter->tx_ring_count) { + tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->tx_ring[i]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } + } + + if (new_rx_count != adapter->rx_ring_count) { + rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); + if (!rx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + /* clone ring and setup updated count */ + rx_ring[i] = *adapter->rx_ring[i]; + rx_ring[i].count = new_rx_count; + err = ixgbevf_setup_rx_resources(&rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(&rx_ring[i]); + } + + vfree(rx_ring); + rx_ring = NULL; + + goto clear_reset; + } + } + } + + /* bring interface down to prepare for update */ + ixgbevf_down(adapter); + + /* Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbevf_free_tx_resources(adapter->tx_ring[i]); + *adapter->tx_ring[i] = tx_ring[i]; + } + adapter->tx_ring_count = new_tx_count; + + vfree(tx_ring); + tx_ring = NULL; + } + + /* Rx */ + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbevf_free_rx_resources(adapter->rx_ring[i]); + *adapter->rx_ring[i] = rx_ring[i]; + } + adapter->rx_ring_count = new_rx_count; + + vfree(rx_ring); + rx_ring = NULL; + } + + /* restore interface using new values */ + ixgbevf_up(adapter); + +clear_reset: + /* free Tx resources if Rx error is encountered */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_free_tx_resources(&tx_ring[i]); + vfree(tx_ring); + } + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); + return err; +} + +static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +{ + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } +} + +static void ixgbevf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + char *base = (char *)adapter; + int i; +#ifdef BP_EXTENDED_STATS + u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, + tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_yields += adapter->rx_ring[i]->stats.yields; + rx_cleaned += adapter->rx_ring[i]->stats.cleaned; + rx_yields += adapter->rx_ring[i]->stats.yields; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_yields += adapter->tx_ring[i]->stats.yields; + tx_cleaned += adapter->tx_ring[i]->stats.cleaned; + tx_yields += adapter->tx_ring[i]->stats.yields; + } + + adapter->bp_rx_yields = rx_yields; + adapter->bp_rx_cleaned = rx_cleaned; + adapter->bp_rx_missed = rx_missed; + + adapter->bp_tx_yields = tx_yields; + adapter->bp_tx_cleaned = tx_cleaned; + adapter->bp_tx_missed = tx_missed; +#endif + + ixgbevf_update_stats(adapter); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + char *p = base + ixgbe_gstrings_stats[i].stat_offset; + char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; + + if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; + else + data[i] = *(u64 *)p; + } else { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; + else + data[i] = *(u32 *)p; + } + } +} + +static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (!link_up) + *data = 1; + + return *data; +} + +/* ethtool register test data */ +struct ixgbevf_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default VF register test */ +static const struct ixgbevf_reg_test reg_test_vf[] = { + { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { .reg = 0 } +}; + +static const u32 register_test_patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF +}; + +static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 pat, val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, + register_test_patterns[pat] & write); + val = ixgbevf_read_reg(&adapter->hw, reg); + if (val != (register_test_patterns[pat] & write & mask)) { + hw_dbg(&adapter->hw, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, + register_test_patterns[pat] & write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbevf_read_reg(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + return false; +} + +static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + const struct ixgbevf_reg_test *test; + u32 i; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } + test = reg_test_vf; + + /* Perform the register test, looping through the test table + * until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return 1; + } + test++; + } + + *data = 0; + return *data; +} + +static void ixgbevf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__IXGBEVF_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + hw_dbg(&adapter->hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbevf_reset(adapter); + + hw_dbg(&adapter->hw, "register testing starting\n"); + if (ixgbevf_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbevf_reset(adapter); + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + hw_dbg(&adapter->hw, "online testing starting\n"); + /* Online tests */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int ixgbevf_nway_reset(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + + return 0; +} + +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int num_vectors, i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept Tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* Tx only */ + q_vector->itr = tx_itr_param; + else + /* Rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + +static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + return 0; + default: + hw_dbg(&adapter->hw, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_82599_RETA_SIZE; + + return 0; +} + +static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_RSS_HASH_KEY_SIZE; + + return 0; +} + +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* If neither indirection table nor hash key was requested - just + * return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; + + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + + return err; +} + +static const struct ethtool_ops ixgbevf_ethtool_ops = { + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, + .get_rxnfc = ixgbevf_get_rxnfc, + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, +}; + +void ixgbevf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ixgbevf_ethtool_ops; +} diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h new file mode 100644 index 000000000..775d08900 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -0,0 +1,504 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_H_ +#define _IXGBEVF_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "vf.h" + +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct ixgbevf_tx_buffer { + union ixgbe_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ixgbevf_rx_buffer { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct ixgbevf_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif +}; + +struct ixgbevf_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ixgbevf_rx_queue_stats { + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +enum ixgbevf_ring_state_t { + __IXGBEVF_TX_DETECT_HANG, + __IXGBEVF_HANG_CHECK_ARMED, +}; + +#define check_for_tx_hang(ring) \ + test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) + +struct ixgbevf_ring { + struct ixgbevf_ring *next; + struct net_device *netdev; + struct device *dev; + void *desc; /* descriptor ring memory */ + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + u16 count; /* amount of descriptors */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + union { + struct ixgbevf_tx_buffer *tx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer_info; + }; + unsigned long state; + struct ixgbevf_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbevf_tx_queue_stats tx_stats; + struct ixgbevf_rx_queue_stats rx_stats; + }; + + u64 hw_csum_rx_error; + u8 __iomem *tail; + struct sk_buff *skb; + + /* holds the special value that gets the hardware register offset + * associated with this ring, which is different for DCB and RSS modes + */ + u16 reg_idx; + int queue_index; /* needed for multiqueue queue management */ +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES +#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_RSS_HASH_KEY_SIZE 40 + +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 + +/* Supported Rx Buffer Sizes */ +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 + +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +struct ixgbevf_ring_container { + struct ixgbevf_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ixgbevf_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbevf_q_vector { + struct ixgbevf_adapter *adapter; + /* index of q_vector within array, also used for finding the bit in + * EICR and friends that represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ + struct napi_struct napi; + struct ixgbevf_ring_container rx, tx; + char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define IXGBEVF_QV_STATE_IDLE 0 +#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ + IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ + IXGBEVF_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->stats.yields++; +#endif + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->stats.yields++; +#endif + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + q_vector->state |= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define IXGBE_MIN_RSC_ITR 24 +#define IXGBE_100K_ITR 40 +#define IXGBE_20K_ITR 200 +#define IXGBE_10K_ITR 400 +#define IXGBE_8K_ITR 500 + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the ixgbe hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) +{ + writel(value, ring->tail); +} + +#define IXGBEVF_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_Q_VECTORS 2 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* board specific private data structure */ +struct ixgbevf_adapter { + /* this field must be first, see ixgbevf_process_skb_fields */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + /* Interrupt Throttle Rate */ + u16 rx_itr_setting; + u16 tx_itr_setting; + + /* interrupt masks */ + u32 eims_enable_mask; + u32 eims_other; + + /* TX */ + int num_tx_queues; + struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ + u64 restart_queue; + u32 tx_timeout_count; + + /* RX */ + int num_rx_queues; + struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + int num_msix_vectors; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1) +#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) + + struct msix_entry *msix_entries; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in ixgbe_vf.h */ + struct ixgbe_hw hw; + u16 msg_enable; + /* Interrupt Throttle Rate */ + u32 eitr_param; + + struct ixgbevf_hw_stats stats; + + unsigned long state; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + +#ifdef BP_EXTENDED_STATS + u64 bp_rx_yields; + u64 bp_rx_cleaned; + u64 bp_rx_missed; + + u64 bp_tx_yields; + u64 bp_tx_cleaned; + u64 bp_tx_missed; +#endif + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 link_speed; + bool link_up; + + struct timer_list service_timer; + struct work_struct service_task; + + spinlock_t mbx_lock; + unsigned long last_reset; +}; + +enum ixbgevf_state_t { + __IXGBEVF_TESTING, + __IXGBEVF_RESETTING, + __IXGBEVF_DOWN, + __IXGBEVF_DISABLED, + __IXGBEVF_REMOVING, + __IXGBEVF_SERVICE_SCHED, + __IXGBEVF_SERVICE_INITED, +}; + +enum ixgbevf_boards { + board_82599_vf, + board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, +}; + +extern const struct ixgbevf_info ixgbevf_82599_vf_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; +extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; + +/* needed by ethtool.c */ +extern const char ixgbevf_driver_name[]; +extern const char ixgbevf_driver_version[]; + +void ixgbevf_up(struct ixgbevf_adapter *adapter); +void ixgbevf_down(struct ixgbevf_adapter *adapter); +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); +void ixgbevf_reset(struct ixgbevf_adapter *adapter); +void ixgbevf_set_ethtool_ops(struct net_device *netdev); +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *); +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); +void ixgbevf_free_rx_resources(struct ixgbevf_ring *); +void ixgbevf_free_tx_resources(struct ixgbevf_ring *); +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); +int ethtool_ioctl(struct ifreq *ifr); + +extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); + +void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); +void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); + +#ifdef DEBUG +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); +#define hw_dbg(hw, format, arg...) \ + printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) +#else +#define hw_dbg(hw, format, arg...) do {} while (0) +#endif + +#endif /* _IXGBEVF_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c new file mode 100644 index 000000000..e71cdde9c --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -0,0 +1,4277 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +const char ixgbevf_driver_name[] = "ixgbevf"; +static const char ixgbevf_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; + +#define DRV_VERSION "2.12.1-k" +const char ixgbevf_driver_version[] = DRV_VERSION; +static char ixgbevf_copyright[] = + "Copyright (c) 2009 - 2012 Intel Corporation."; + +static const struct ixgbevf_info *ixgbevf_info_tbl[] = { + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, +}; + +/* ixgbevf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ixgbevf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) +{ + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state) && + !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); +} + +/* forward decls */ +static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); + +static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + dev_err(&adapter->pdev->dev, "Adapter removed\n"); + if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) + ixgbevf_service_event_schedule(adapter); +} + +static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_VFSTATUS) { + ixgbevf_remove_adapter(hw); + return; + } + value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbevf_remove_adapter(hw); +} + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbevf_check_remove(hw, reg); + return value; +} + +/** + * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + **/ +static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { + /* Tx or Rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); + } +} + +static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) +{ + return ring->stats.packets; +} + +static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) +{ + u32 tx_done = ixgbevf_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, + &tx_ring->state); + } + /* reset the countdown */ + clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); + + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + + return false; +} + +static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { + adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + ixgbevf_service_event_schedule(adapter); + } +} + +/** + * ixgbevf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbevf_tx_timeout(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ixgbevf_tx_timeout_reset(adapter); +} + +/** + * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: board private structure + * @tx_ring: tx ring to clean + **/ +static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *tx_ring) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = tx_ring->count / 2; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { + struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_tx_desc *eop_desc; + + eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; + + pr_err("Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " next_to_watch <%p>\n" + " eop_desc->wb.status <%x>\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + eop_desc, (eop_desc ? eop_desc->wb.status : 0), + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbevf_tx_timeout_reset(adapter); + + return true; + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__IXGBEVF_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * ixgbevf_rx_skb - Helper function to determine proper Rx method + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector)) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containig ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IP and error */ + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + return; + } + + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) + return; + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/** + * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the checksum, VLAN, protocol, and other fields within + * the skb. + **/ +static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbevf_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ixgbevf_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + return true; +} + +static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace + **/ +static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, + u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbevf_rx_buffer *bi; + unsigned int i = rx_ring->next_to_use; + + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; + + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) + break; + + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ixgbevf_write_tail(rx_ring, i); + } +} + +/** + * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbevf specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + **/ +static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbevf_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbevf_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbevf_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff) +{ + struct ixgbevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool ixgbevf_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/** + * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IXGBEVF_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as is */ + if (likely(!ixgbevf_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; +} + +static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, + u32 qmask) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); +} + +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ixgbevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + /* retrieve a buffer from the ring */ + skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (ixgbevf_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* Workaround hardware that can't do proper VEPA multicast + * source pruning. + */ + if ((skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST) && + ether_addr_equal(rx_ring->netdev->dev_addr, + eth_hdr(skb)->h_source)) { + dev_kfree_skb_irq(skb); + continue; + } + + /* populate checksum, VLAN, and protocol */ + ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); + + ixgbevf_rx_skb(q_vector, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * ixgbevf_poll - NAPI polling calback + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one or more rings associated with a + * q_vector. + **/ +static int ixgbevf_poll(struct napi_struct *napi, int budget) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ixgbevf_for_each_ring(ring, q_vector->tx) + clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + +#ifdef CONFIG_NET_RX_BUSY_POLL + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ixgbevf_for_each_ring(ring, q_vector->rx) + clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget) + < per_ring_budget); + +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); +#endif + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbevf_set_itr(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, + 1 << q_vector->v_idx); + + return 0; +} + +/** + * ixgbevf_write_eitr - write VTEITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + **/ +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; + + /* set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * ixgbevf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_q_vector *q_vector; + int q_vectors, v_idx; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + adapter->eims_enable_mask = 0; + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + struct ixgbevf_ring *ring; + + q_vector = adapter->q_vector[v_idx]; + + ixgbevf_for_each_ring(ring, q_vector->rx) + ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ixgbevf_for_each_ring(ring, q_vector->tx) + ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + if (q_vector->tx.ring && !q_vector->rx.ring) { + /* Tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* Rx or Rx/Tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= 1 << v_idx; + + ixgbevf_write_eitr(q_vector); + } + + ixgbevf_set_ivar(adapter, -1, 1, v_idx); + /* setup eims_other and add value to global eims_enable_mask */ + adapter->eims_other = 1 << v_idx; + adapter->eims_enable_mask |= adapter->eims_other; +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbevf_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttle rate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > 20) + itr_setting = bulk_latency; + else if (bytes_perint <= 10) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= 20) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) +{ + u32 new_itr = q_vector->itr; + u8 current_itr; + + ixgbevf_update_itr(q_vector, &q_vector->tx); + ixgbevf_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IXGBE_100K_ITR; + break; + case low_latency: + new_itr = IXGBE_20K_ITR; + break; + case bulk_latency: + default: + new_itr = IXGBE_8K_ITR; + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + ixgbevf_write_eitr(q_vector); + } +} + +static irqreturn_t ixgbevf_msix_other(int irq, void *data) +{ + struct ixgbevf_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.get_link_status = 1; + + ixgbevf_service_event_schedule(adapter); + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +/** + * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) +{ + struct ixgbevf_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, + int r_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + a->rx_ring[r_idx]->next = q_vector->rx.ring; + q_vector->rx.ring = a->rx_ring[r_idx]; + q_vector->rx.count++; +} + +static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, + int t_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + a->tx_ring[t_idx]->next = q_vector->tx.ring; + q_vector->tx.ring = a->tx_ring[t_idx]; + q_vector->tx.count++; +} + +/** + * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + map_vector_to_txq(adapter, v_start, txr_idx); + goto out; + } + + /* If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + */ + /* Re-adjusting *qpv takes care of the remainder. */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + } + for (i = v_start; i < q_vectors; i++) { + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } + +out: + return err; +} + +/** + * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < q_vectors; vector++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + hw_dbg(&adapter->hw, + "request_irq failed for MSIX interrupt Error: %d\n", + err); + goto free_queue_irqs; + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + &ixgbevf_msix_other, 0, netdev->name, adapter); + if (err) { + hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", + err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + /* This failure is non-recoverable - it indicates the system is + * out of MSIX vector resources and the VF driver cannot run + * without them. Set the number of msix vectors to zero + * indicating that not enough can be allocated. The error + * will be returned to the user indicating device open failed. + * Any further attempts to force the driver to open will also + * fail. The only way to recover is to unload the driver and + * reload it again. If the system has recovered some MSIX + * vectors then it may succeed. + */ + adapter->num_msix_vectors = 0; + return err; +} + +static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) +{ + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < q_vectors; i++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->rx.count = 0; + q_vector->tx.count = 0; + } +} + +/** + * ixgbevf_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) +{ + int err = 0; + + err = ixgbevf_request_msix_irqs(adapter); + + if (err) + hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) +{ + int i, q_vectors; + + q_vectors = adapter->num_msix_vectors; + i = q_vectors - 1; + + free_irq(adapter->msix_entries[i].vector, adapter); + i--; + + for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rx.ring && + !adapter->q_vector[i]->tx.ring) + continue; + + free_irq(adapter->msix_entries[i].vector, + adapter->q_vector[i]); + } + + ixgbevf_reset_q_vectors(adapter); +} + +/** + * ixgbevf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); + + IXGBE_WRITE_FLUSH(hw); + + for (i = 0; i < adapter->num_msix_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); +} + +/** + * ixgbevf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); +} + +/** + * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = IXGBE_TXDCTL_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + + /* disable head writeback */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); + + /* enable relaxed ordering */ + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), + (IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN)); + + /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= (8 << 16); /* WTHRESH = 8 */ + + /* Setting PTHRESH to 32 both improves performance */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); + + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + pr_err("Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 srrctl; + + srrctl = IXGBE_SRRCTL_DROP_EN; + + srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); +} + +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + + if (adapter->num_rx_queues > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); +} + +#define IXGBEVF_MAX_RX_DESC_POLL 10 +static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + /* the hardware may take up to 100us to really disable the Rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); +} + +static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + pr_err("RXDCTL.ENABLE queue %d not set while polling\n", + reg_idx); +} + +static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfmrqc = 0, vfreta = 0; + u32 rss_key[10]; + u16 rss_i = adapter->num_rx_queues; + int i, j; + + /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + + /* Fill out redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + vfreta = (vfreta << 8) | (j * 0x1); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + } + + /* Perform hash on these packet types */ + vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | + IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | + IXGBE_VFMRQC_RSS_FIELD_IPV6 | + IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; + + vfmrqc |= IXGBE_VFMRQC_RSSEN; + + IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); +} + +static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + ixgbevf_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + + /* enable relaxed ordering */ + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), + IXGBE_DCA_RXCTRL_DESC_RRO_EN); + + /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + ixgbevf_configure_srrctl(adapter, reg_idx); + + /* allow any size packet since we can handle overflow */ + rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; + + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + ixgbevf_rx_desc_queue_enable(adapter, ring); + ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); +} + +/** + * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + ixgbevf_setup_psrtype(adapter); + if (hw->mac.type >= ixgbe_mac_X550_vf) + ixgbevf_setup_vfmrqc(adapter); + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* add VID to filter table */ + err = hw->mac.ops.set_vfta(hw, vid, 0, true); + + spin_unlock_bh(&adapter->mbx_lock); + + /* translate error return types so error makes sense */ + if (err == IXGBE_ERR_MBX) + return -EIO; + + if (err == IXGBE_ERR_INVALID_ARGUMENT) + return -EACCES; + + set_bit(vid, adapter->active_vlans); + + return err; +} + +static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err = -EOPNOTSUPP; + + spin_lock_bh(&adapter->mbx_lock); + + /* remove VID from filter table */ + err = hw->mac.ops.set_vfta(hw, vid, 0, false); + + spin_unlock_bh(&adapter->mbx_lock); + + clear_bit(vid, adapter->active_vlans); + + return err; +} + +static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbevf_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +} + +static int ixgbevf_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int count = 0; + + if ((netdev_uc_count(netdev)) > 10) { + pr_err("Too many unicast filters - No Space\n"); + return -ENOSPC; + } + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); + udelay(200); + } + } else { + /* If the list is empty then send message to PF driver to + * clear all MAC VLANs on this VF. + */ + hw->mac.ops.set_uc_addr(hw, 0, NULL); + } + + return count; +} + +/** + * ixgbevf_set_rx_mode - Multicast and unicast set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the multicast address + * list, unicast address list or the network interface flags are updated. + * This routine is responsible for configuring the hardware for proper + * multicast mode and configuring requested unicast filters. + **/ +static void ixgbevf_set_rx_mode(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + + /* reprogram multicast list */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + + ixgbevf_write_uc_addr_list(netdev); + + spin_unlock_bh(&adapter->mbx_lock); +} + +static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + } +} + +static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + unsigned int num_rx_queues = adapter->num_rx_queues; + unsigned int num_tx_queues = adapter->num_tx_queues; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return err; + + if (num_tcs > 1) { + /* we need only one Tx queue */ + num_tx_queues = 1; + + /* update default Tx ring register index */ + adapter->tx_ring[0]->reg_idx = def_q; + + /* we need as many queues as traffic classes */ + num_rx_queues = num_tcs; + } + + /* if we have a bad config abort request queue reset */ + if ((adapter->num_rx_queues != num_rx_queues) || + (adapter->num_tx_queues != num_tx_queues)) { + /* force mailbox timeout to prevent further messages */ + hw->mbx.timeout = 0; + + /* wait for watchdog to come around and bail us out */ + adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + } + + return 0; +} + +static void ixgbevf_configure(struct ixgbevf_adapter *adapter) +{ + ixgbevf_configure_dcb(adapter); + + ixgbevf_set_rx_mode(adapter->netdev); + + ixgbevf_restore_vlan(adapter); + + ixgbevf_configure_tx(adapter); + ixgbevf_configure_rx(adapter); +} + +static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) +{ + /* Only save pre-reset stats if there are some */ + if (adapter->stats.vfgprc || adapter->stats.vfgptc) { + adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - + adapter->stats.base_vfgprc; + adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - + adapter->stats.base_vfgptc; + adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - + adapter->stats.base_vfgorc; + adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - + adapter->stats.base_vfgotc; + adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - + adapter->stats.base_vfmprc; + } +} + +static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; + adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; + adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; + adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; +} + +static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int api[] = { ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown }; + int err = 0, idx = 0; + + spin_lock_bh(&adapter->mbx_lock); + + while (api[idx] != ixgbe_mbox_api_unknown) { + err = ixgbevf_negotiate_api_version(hw, api[idx]); + if (!err) + break; + idx++; + } + + spin_unlock_bh(&adapter->mbx_lock); +} + +static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + ixgbevf_configure_msix(adapter); + + spin_lock_bh(&adapter->mbx_lock); + + if (is_valid_ether_addr(hw->mac.addr)) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + else + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); + + spin_unlock_bh(&adapter->mbx_lock); + + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DOWN, &adapter->state); + ixgbevf_napi_enable_all(adapter); + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + ixgbevf_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(netdev); + + ixgbevf_save_reset_stats(adapter); + ixgbevf_init_last_counter_stats(adapter); + + hw->mac.get_link_status = 1; + mod_timer(&adapter->service_timer, jiffies); +} + +void ixgbevf_up(struct ixgbevf_adapter *adapter) +{ + ixgbevf_configure(adapter); + + ixgbevf_up_complete(adapter); +} + +/** + * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + unsigned int i; + + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring pages */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbevf_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_page(rx_buffer->page); + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); +} + +/** + * ixgbevf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) +{ + struct ixgbevf_tx_buffer *tx_buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_clean_tx_ring(adapter->tx_ring[i]); +} + +void ixgbevf_down(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ixgbevf_irq_disable(adapter); + + ixgbevf_napi_disable_all(adapter); + + del_timer_sync(&adapter->service_timer); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), + IXGBE_TXDCTL_SWFLSH); + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbevf_reset(adapter); + + ixgbevf_clean_all_tx_rings(adapter); + ixgbevf_clean_all_rx_rings(adapter); +} + +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) +{ + WARN_ON(in_interrupt()); + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + msleep(1); + + ixgbevf_down(adapter); + ixgbevf_up(adapter); + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); +} + +void ixgbevf_reset(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + if (hw->mac.ops.reset_hw(hw)) { + hw_dbg(hw, "PF still resetting\n"); + } else { + hw->mac.ops.init_hw(hw); + ixgbevf_negotiate_api(adapter); + } + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + adapter->last_reset = jiffies; +} + +static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, + int vectors) +{ + int vector_threshold; + + /* We'll want at least 2 (vector_threshold): + * 1) TxQ[0] + RxQ[0] handler + * 2) Other (Link Status Change, etc.) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { + dev_err(&adapter->pdev->dev, + "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return vectors; + } + + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = vectors; + + return 0; +} + +/** + * ixgbevf_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + int err; + + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return; + + /* we need as many queues as traffic classes */ + if (num_tcs > 1) { + adapter->num_rx_queues = num_tcs; + } else { + u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES); + + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + adapter->num_rx_queues = rss; + adapter->num_tx_queues = rss; + default: + break; + } + } +} + +/** + * ixgbevf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ring *ring; + int rx = 0, tx = 0; + + for (; tx < adapter->num_tx_queues; tx++) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->count = adapter->tx_ring_count; + ring->queue_index = tx; + ring->reg_idx = tx; + + adapter->tx_ring[tx] = ring; + } + + for (; rx < adapter->num_rx_queues; rx++) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + ring->count = adapter->rx_ring_count; + ring->queue_index = rx; + ring->reg_idx = rx; + + adapter->rx_ring[rx] = ring; + } + + return 0; + +err_allocation: + while (tx) { + kfree(adapter->tx_ring[--tx]); + adapter->tx_ring[tx] = NULL; + } + + while (rx) { + kfree(adapter->rx_ring[--rx]); + adapter->rx_ring[rx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + int vector, v_budget; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. + */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = ixgbevf_acquire_msix_vectors(adapter, v_budget); + if (err) + goto out; + + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto out; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + +out: + return err; +} + +/** + * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbevf_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbevf_poll, 64); +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +/** + * ixgbevf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbevf_set_num_queues(adapter); + + err = ixgbevf_set_interrupt_capability(adapter); + if (err) { + hw_dbg(&adapter->hw, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbevf_alloc_q_vectors(adapter); + if (err) { + hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbevf_alloc_queues(adapter); + if (err) { + pr_err("Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBEVF_DOWN, &adapter->state); + + return 0; +err_alloc_queues: + ixgbevf_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbevf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + kfree(adapter->tx_ring[i]); + adapter->tx_ring[i] = NULL; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + kfree(adapter->rx_ring[i]); + adapter->rx_ring[i] = NULL; + } + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbevf_free_q_vectors(adapter); + ixgbevf_reset_interrupt_capability(adapter); +} + +/** + * ixgbevf_sw_init - Initialize general software structures + * @adapter: board private structure to initialize + * + * ixgbevf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->mbx.ops.init_params(hw); + + /* assume legacy case in which PF would only give VF 2 queues */ + hw->mac.max_tx_queues = 2; + hw->mac.max_rx_queues = 2; + + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state. Is the PF interface up?\n"); + } else { + err = hw->mac.ops.init_hw(hw); + if (err) { + pr_err("init_shared_code failed: %d\n", err); + goto out; + } + ixgbevf_negotiate_api(adapter); + err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + if (err) + dev_info(&pdev->dev, "Error reading MAC address\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address\n"); + eth_hw_addr_random(netdev); + memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); + } + + /* Enable dynamic interrupt throttling rates */ + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; + adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + + set_bit(__IXGBEVF_DOWN, &adapter->state); + return 0; + +out: + return err; +} + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = IXGBE_READ_REG(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +/** + * ixgbevf_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, + adapter->stats.vfgprc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, + adapter->stats.vfgptc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + adapter->stats.last_vfgorc, + adapter->stats.vfgorc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + adapter->stats.last_vfgotc, + adapter->stats.vfgotc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, + adapter->stats.vfmprc); + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->hw_csum_rx_error += + adapter->rx_ring[i]->hw_csum_rx_error; + adapter->rx_ring[i]->hw_csum_rx_error = 0; + } +} + +/** + * ixgbevf_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbevf_service_timer(unsigned long data) +{ + struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); + + ixgbevf_service_event_schedule(adapter); +} + +static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) +{ + if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + return; + + adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + adapter->tx_timeout_count++; + + ixgbevf_reinit_locked(adapter); +} + +/** + * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + **/ +static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eics = 0; + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + /* get one bit for every active Tx/Rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbevf_q_vector *qv = adapter->q_vector[i]; + + if (qv->rx.ring || qv->tx.ring) + eics |= 1 << i; + } + + /* Cause software interrupt to ensure rings are cleaned */ + IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); +} + +/** + * ixgbevf_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + **/ +static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + s32 err; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + spin_unlock_bh(&adapter->mbx_lock); + + /* if check for link returns error we will need to reset */ + if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { + adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + link_up = false; + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * ixgbevf_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", + (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + "10 Gbps" : + (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? + "1 Gbps" : + (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? + "100 Mbps" : + "unknown speed"); + + netif_carrier_on(netdev); +} + +/** + * ixgbevf_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + + netif_carrier_off(netdev); +} + +/** + * ixgbevf_watchdog_subtask - worker thread to bring link up + * @work: pointer to work_struct containing our data + **/ +static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + ixgbevf_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbevf_watchdog_link_is_up(adapter); + else + ixgbevf_watchdog_link_is_down(adapter); + + ixgbevf_update_stats(adapter); +} + +/** + * ixgbevf_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbevf_service_task(struct work_struct *work) +{ + struct ixgbevf_adapter *adapter = container_of(work, + struct ixgbevf_adapter, + service_task); + struct ixgbe_hw *hw = &adapter->hw; + + if (IXGBE_REMOVED(hw->hw_addr)) { + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbevf_down(adapter); + rtnl_unlock(); + } + return; + } + + ixgbevf_queue_reset_subtask(adapter); + ixgbevf_reset_subtask(adapter); + ixgbevf_watchdog_subtask(adapter); + ixgbevf_check_hang_subtask(adapter); + + ixgbevf_service_event_complete(adapter); +} + +/** + * ixgbevf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) +{ + ixgbevf_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbevf_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: Tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) +{ + int size; + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) +{ + int size; + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); + break; + } + return err; +} + +/** + * ixgbevf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) +{ + ixgbevf_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbevf_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbevf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbevf_open(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* A previous failure to open the device because of a lack of + * available MSIX vector resources may have reset the number + * of msix vectors variable to zero. The only way to recover + * is to unload/reload the driver and hope that the system has + * been able to recover some MSIX vector resources. + */ + if (!adapter->num_msix_vectors) + return -ENOMEM; + + if (hw->adapter_stopped) { + ixgbevf_reset(adapter); + /* if adapter is still stopped then PF isn't up and + * the VF can't start. + */ + if (hw->adapter_stopped) { + err = IXGBE_ERR_MBX; + pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); + goto err_setup_reset; + } + } + + /* disallow open during test */ + if (test_bit(__IXGBEVF_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbevf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbevf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbevf_configure(adapter); + + /* Map the Tx/Rx rings to the vectors we were allotted. + * if request_irq will be called in this function map_rings + * must be called *before* up_complete + */ + ixgbevf_map_rings_to_vectors(adapter); + + err = ixgbevf_request_irq(adapter); + if (err) + goto err_req_irq; + + ixgbevf_up_complete(adapter); + + return 0; + +err_req_irq: + ixgbevf_down(adapter); +err_setup_rx: + ixgbevf_free_all_rx_resources(adapter); +err_setup_tx: + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_reset(adapter); + +err_setup_reset: + + return err; +} + +/** + * ixgbevf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbevf_close(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + + return 0; +} + +static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + + if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + return; + + adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbevf_close(dev); + + ixgbevf_clear_interrupt_scheme(adapter); + ixgbevf_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + ixgbevf_open(dev); +} + +static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + *hdr_len = skb_transport_offset(skb) + l4len; + + /* update GSO size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4_hdr = 0; + + switch (first->protocol) { + case htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + break; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); +} + +static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | + IXGBE_ADVTXD_DCMD_DEXT); + + /* set HW VLAN bit if VLAN is present */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + + return cmd_type; +} + +static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); + + /* enable L4 checksum for TSO and TX checksum offload */ + if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); + + /* enble IPv4 checksum for TSO */ + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); + + /* use index 1 context for TSO/FSO/FCOE */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); + + tx_desc->read.olinfo_status = olinfo_status; +} + +static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + const u8 hdr_len) +{ + dma_addr_t dma; + struct sk_buff *skb = first->skb; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; + __le32 cmd_type; + u16 i = tx_ring->next_to_use; + + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + + ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); + cmd_type = ixgbevf_tx_cmd_type(tx_flags); + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + for (;;) { + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + + frag++; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); + tx_desc->read.cmd_type_len = cmd_type; + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier (wmb) to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + ixgbevf_write_tail(tx_ring, i); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbevf_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + + return 0; +} + +static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) + return 0; + return __ixgbevf_maybe_stop_tx(tx_ring, size); +} + +static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_tx_buffer *first; + struct ixgbevf_ring *tx_ring; + int tso; + u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif + u8 hdr_len = 0; + u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); + + if (!dst_mac || is_link_local_ether_addr(dst_mac)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = vlan_get_protocol(skb); + + tso = ixgbevf_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + ixgbevf_tx_csum(tx_ring, first); + + ixgbevf_tx_map(tx_ring, first, hdr_len); + + ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +/** + * ixgbevf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + spin_lock_bh(&adapter->mbx_lock); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + + spin_unlock_bh(&adapter->mbx_lock); + + return 0; +} + +/** + * ixgbevf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + default: + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + } + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > max_possible_frame)) + return -EINVAL; + + hw_dbg(hw, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + rtnl_unlock(); + } + + ixgbevf_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbevf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + + ixgbevf_reset(adapter); + + rtnl_lock(); + err = ixgbevf_init_interrupt_scheme(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + return err; + } + + if (netif_running(netdev)) { + err = ixgbevf_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +static void ixgbevf_shutdown(struct pci_dev *pdev) +{ + ixgbevf_suspend(pdev, PMSG_SUSPEND); +} + +static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + unsigned int start; + u64 bytes, packets; + const struct ixgbevf_ring *ring; + int i; + + ixgbevf_update_stats(adapter); + + stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_bytes += bytes; + stats->rx_packets += packets; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + } + + return stats; +} + +static const struct net_device_ops ixgbevf_netdev_ops = { + .ndo_open = ixgbevf_open, + .ndo_stop = ixgbevf_close, + .ndo_start_xmit = ixgbevf_xmit_frame, + .ndo_set_rx_mode = ixgbevf_set_rx_mode, + .ndo_get_stats64 = ixgbevf_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbevf_set_mac, + .ndo_change_mtu = ixgbevf_change_mtu, + .ndo_tx_timeout = ixgbevf_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif +}; + +static void ixgbevf_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &ixgbevf_netdev_ops; + ixgbevf_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ixgbevf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbevf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbevf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbevf_adapter *adapter = NULL; + struct ixgbe_hw *hw = NULL; + const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; + int err, pci_using_dac; + bool disable_dev = false; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, ixgbevf_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), + MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + /* call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + ixgbevf_assign_netdev_ops(netdev); + + /* Setup HW API */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); + + /* setup the private structure */ + err = ixgbevf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* The HW MAC address was set and/or determined in sw_init */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + pr_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + netdev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (IXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + + setup_timer(&adapter->service_timer, &ixgbevf_service_timer, + (unsigned long)adapter); + + INIT_WORK(&adapter->service_task, ixgbevf_service_task); + set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); + clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); + + err = ixgbevf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + strcpy(netdev->name, "eth%d"); + + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, netdev); + netif_carrier_off(netdev); + + ixgbevf_init_last_counter_stats(adapter); + + /* print the VF info */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); + + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + break; + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + break; + } + + return 0; + +err_register: + ixgbevf_clear_interrupt_scheme(adapter); +err_sw_init: + ixgbevf_reset_interrupt_capability(adapter); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * ixgbevf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbevf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ixgbevf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + set_bit(__IXGBEVF_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + ixgbevf_clear_interrupt_scheme(adapter); + ixgbevf_reset_interrupt_capability(adapter); + + iounmap(adapter->io_addr); + pci_release_regions(pdev); + + hw_dbg(&adapter->hw, "Remove complete\n"); + + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * ixgbevf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ixgbevf_down(adapter); + + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbevf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the ixgbevf_resume routine. + **/ +static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + + ixgbevf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgbevf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the ixgbevf_resume routine. + **/ +static void ixgbevf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_up(adapter); + + netif_device_attach(netdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers ixgbevf_err_handler = { + .error_detected = ixgbevf_io_error_detected, + .slot_reset = ixgbevf_io_slot_reset, + .resume = ixgbevf_io_resume, +}; + +static struct pci_driver ixgbevf_driver = { + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = ixgbevf_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, +#endif + .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler +}; + +/** + * ixgbevf_init_module - Driver Registration Routine + * + * ixgbevf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbevf_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", ixgbevf_driver_string, + ixgbevf_driver_version); + + pr_info("%s\n", ixgbevf_copyright); + + ret = pci_register_driver(&ixgbevf_driver); + return ret; +} + +module_init(ixgbevf_init_module); + +/** + * ixgbevf_exit_module - Driver Exit Cleanup Routine + * + * ixgbevf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbevf_exit_module(void) +{ + pci_unregister_driver(&ixgbevf_driver); +} + +#ifdef DEBUG +/** + * ixgbevf_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + + return adapter->netdev->name; +} + +#endif +module_exit(ixgbevf_exit_module); + +/* ixgbevf_main.c */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.c b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.c new file mode 100644 index 000000000..dc68fea48 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -0,0 +1,348 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" +#include "ixgbevf.h" + +/** + * ixgbevf_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message notification + **/ +static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message acknowledgment + **/ +static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = -IXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbevf_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = -IXGBE_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbevf_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * ixgbevf_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbevf_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = 0; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = 0; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return 0 if we obtained the mailbox lock + **/ +static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = 0; + + return ret_val; +} + +/** + * ixgbevf_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer + **/ +static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent PF/VF race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbevf_check_for_msg_vf(hw); + ixgbevf_check_for_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully read message from buffer + **/ +static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val = 0; + u16 i; + + /* lock the mailbox to prevent PF/VF race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for VF mailbox + */ +static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + +const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; + diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h new file mode 100644 index 000000000..82f44e06e --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -0,0 +1,128 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "vf.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +/* forward declaration of the HW struct */ +struct ixgbe_hw; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/regs.h b/kernel/drivers/net/ethernet/intel/ixgbevf/regs.h new file mode 100644 index 000000000..2764fd162 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -0,0 +1,84 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_REGS_H_ +#define _IXGBEVF_REGS_H_ + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) + +/* VFMRQC bits */ +#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) + +#endif /* _IXGBEVF_REGS_H_ */ diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c new file mode 100644 index 000000000..d1339b050 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -0,0 +1,752 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "vf.h" +#include "ixgbevf.h" + +/** + * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbevf_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbevf_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts. + **/ +static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + + IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + mdelay(10); + + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); + if (ret_val) + return ret_val; + + /* New versions of the PF may NACK the reset return message + * to indicate that no MAC address has yet been assigned for + * the VF. + */ + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + ether_addr_copy(hw->mac.perm_addr, addr); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return 0; +} + +/** + * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit by stopped each queue */ + number_of_queues = hw->mac.max_rx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + if (reg_val & IXGBE_RXDCTL_ENABLE) { + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + } + + IXGBE_WRITE_FLUSH(hw); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.max_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); + } + } + + return 0; +} + +/** + * ixgbevf_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming Rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbevf_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to storage for retrieved MAC address + **/ +static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + ether_addr_copy(mac_addr, hw->mac.perm_addr); + + return 0; +} + +static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + if (addr) + ether_addr_copy(msg_addr, addr); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (!ret_val) + if (msgbuf[0] == + (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) + ret_val = -ENOMEM; + + return ret_val; +} + +/** + * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. + * @adapter: pointer to the port handle + * @reta: buffer to fill with RETA contents. + * @num_rx_queues: Number of Rx queues configured for this port + * + * The "reta" buffer should be big enough to contain 32 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = IXGBEVF_82599_RETA_SIZE / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RETA; + + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * ixgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RSS_KEY; + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 11); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); + + return 0; +} + +/** + * ixgbevf_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + **/ +static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + ether_addr_copy(msg_addr, addr); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); + + return ret_val; +} + +static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, + u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 retmsg[IXGBE_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, size); +} + +/** + * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * + * Updates the Multicast Table Array. + **/ +static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = netdev_mc_count(netdev); + if (cnt > 30) + cnt = 30; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == cnt) + break; + if (is_link_local_ether_addr(ha->addr)) + continue; + + vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); + } + + ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + + return 0; +} + +/** + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + **/ +static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; + + /* remove extra bits from the message */ + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); + + if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) + err = IXGBE_ERR_INVALID_ARGUMENT; + +mbx_err: + return err; +} + +/** + * ixgbevf_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: Unused in this implementation + * @autoneg: Unused in this implementation + * @autoneg_wait_to_complete: Unused in this implementation + * + * Do nothing and return success. VF drivers are not allowed to change + * global settings. Maintained for driver compatibility. + **/ +static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + return 0; +} + +/** + * ixgbevf_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 links_reg; + u32 in_msg = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 3); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 3); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; +} + +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support ixgbevf_get_queues */ + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = IXGBE_VF_GET_QUEUE; + msg[1] = msg[2] = msg[3] = msg[4] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 5); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 5); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if we we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[IXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} + +static const struct ixgbe_mac_operations ixgbevf_mac_ops = { + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, +}; + +const struct ixgbevf_info ixgbevf_82599_vf_info = { + .mac = ixgbe_mac_82599_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X540_vf_info = { + .mac = ixgbe_mac_X540_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h new file mode 100644 index 000000000..d40f036b6 --- /dev/null +++ b/kernel/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -0,0 +1,215 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef __IXGBE_VF_H__ +#define __IXGBE_VF_H__ + +#include +#include +#include +#include +#include + +#include "defines.h" +#include "regs.h" +#include "mbx.h" + +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + + /* Link */ + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82599_vf, + ixgbe_mac_X540_vf, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_num_macs +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum ixgbe_mac_type type; + + s32 mc_filter_type; + + bool get_link_status; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; +}; + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*check_for_msg)(struct ixgbe_hw *); + s32 (*check_for_ack)(struct ixgbe_hw *); + s32 (*check_for_rst)(struct ixgbe_hw *); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + void *back; + + u8 __iomem *hw_addr; + + struct ixgbe_mac_info mac; + struct ixgbe_mbx_info mbx; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; + bool adapter_stopped; + + int api_version; +}; + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +struct ixgbevf_info { + enum ixgbe_mac_type mac; + const struct ixgbe_mac_operations *mac_ops; +}; + +#define IXGBE_FAILED_READ_REG 0xffffffffU + +#define IXGBE_REMOVED(a) unlikely(!(a)) + +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (IXGBE_REMOVED(reg_addr)) + return; + writel(value, reg_addr + reg); +} + +#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); +#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) + +static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset, u32 value) +{ + ixgbe_write_reg(hw, reg + (offset << 2), value); +} + +#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) + +static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset) +{ + return ixgbevf_read_reg(hw, reg + (offset << 2)); +} + +#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) + +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key); +#endif /* __IXGBE_VF_H__ */ -- cgit 1.2.3-korg