summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/net/ethernet/qlogic
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/qlogic')
-rw-r--r--kernel/drivers/net/ethernet/qlogic/Kconfig96
-rw-r--r--kernel/drivers/net/ethernet/qlogic/Makefile8
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/Makefile27
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.h1889
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c941
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c959
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1079
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2593
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h285
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c1933
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3526
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qla3xxx.c3949
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qla3xxx.h1189
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/Makefile15
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2404
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4165
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h661
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2616
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c284
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c1428
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1147
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h122
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c1882
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h948
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c1710
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h225
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1310
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2230
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4341
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c1414
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h276
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2214
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c2047
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c1438
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/Makefile7
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge.h2338
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2042
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c735
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c5016
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1284
40 files changed, 62773 insertions, 0 deletions
diff --git a/kernel/drivers/net/ethernet/qlogic/Kconfig b/kernel/drivers/net/ethernet/qlogic/Kconfig
new file mode 100644
index 000000000..d49cba129
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/Kconfig
@@ -0,0 +1,96 @@
+#
+# QLogic network device configuration
+#
+
+config NET_VENDOR_QLOGIC
+ bool "QLogic devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about QLogic cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_QLOGIC
+
+config QLA3XXX
+ tristate "QLogic QLA3XXX Network Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qla3xxx.
+
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
+config QLCNIC_SRIOV
+ bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+ depends on QLCNIC && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QLE83XX Converged Ethernet devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ ---help---
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
+config QLCNIC_VXLAN
+ bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
+ default n
+ depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over QLogic's
+ 84XX series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
+config QLGE
+ tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qlge.
+
+config NETXEN_NIC
+ tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This enables the support for NetXen's Gigabit Ethernet card.
+
+endif # NET_VENDOR_QLOGIC
diff --git a/kernel/drivers/net/ethernet/qlogic/Makefile b/kernel/drivers/net/ethernet/qlogic/Makefile
new file mode 100644
index 000000000..b2a283d9a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the QLogic network device drivers.
+#
+
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
+obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/Makefile b/kernel/drivers/net/ethernet/qlogic/netxen/Makefile
new file mode 100644
index 000000000..e14e60c88
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -0,0 +1,27 @@
+# Copyright (C) 2003 - 2009 NetXen, Inc.
+# Copyright (C) 2009 - QLogic Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution
+# in the file called "COPYING".
+#
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+ netxen_nic_ethtool.o netxen_nic_ctx.o
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
new file mode 100644
index 000000000..0a5e204a0
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -0,0 +1,1889 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include "netxen_nic_hdr.h"
+#include "netxen_nic_hw.h"
+
+#define _NETXEN_NIC_LINUX_MAJOR 4
+#define _NETXEN_NIC_LINUX_MINOR 0
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
+
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+ NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define NETXEN_NUM_FLASH_SECTORS (64)
+#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
+#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
+ * NETXEN_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_RCV_PRODUCER_OFFSET 0
+#define NETXEN_RCV_PEG_DB_ID 2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
+
+#define ADDR_IN_WINDOW1(off) \
+ ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg) \
+ ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+ pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define DB_NORMALIZE(adapter, off) \
+ (adapter->ahw.db_base + (off))
+
+#define NX_P2_C0 0x24
+#define NX_P2_C1 0x25
+#define NX_P3_A0 0x30
+#define NX_P3_A2 0x30
+#define NX_P3_B0 0x40
+#define NX_P3_B1 0x41
+#define NX_P3_B2 0x42
+#define NX_P3P_A0 0x50
+
+#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
+#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
+#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define SECOND_PAGE_GROUP_START 0x6000000
+#define SECOND_PAGE_GROUP_END 0x68BC000
+
+#define THIRD_PAGE_GROUP_START 0x70E4000
+#define THIRD_PAGE_GROUP_END 0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define P2_MAX_MTU (8000)
+#define P3_MAX_MTU (9600)
+#define NX_ETHERMTU 1500
+#define NX_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define NX_P2_RX_BUF_MAX_LEN 1760
+#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
+#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
+#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
+#define NX_CT_DEFAULT_RX_BUF_LEN 2048
+#define NX_LRO_BUFFER_EXTRA 2048
+
+#define NX_RX_LRO_BUFFER_LENGTH (8060)
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP 0x10
+#define PEGNET_REQUEST 0x11
+
+#define MAX_NUM_CARDS 4
+
+#define NETXEN_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define NX_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START 0xff00
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define NETXEN_CTX_SIGNATURE 0xdee0
+#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
+#define NETXEN_CTX_RESET 0xbad0
+#define NETXEN_CTX_D3_RESET 0xacc0
+#define NETXEN_RCV_PRODUCER(ringid) (ringid)
+
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count) \
+ (((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+#define NX_MAX_PCI_FUNC 8
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx
+ * Bit 2 : priv_id => must be 1
+ * Bit 3-17 : count => for doorbell
+ * Bit 18-27 : ctx_id => Context id
+ * Bit 28-31 : opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define netxen_set_msg_peg_id(config_word, val) \
+ ((config_word) &= ~3, (config_word) |= val & 3)
+#define netxen_set_msg_privid(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_set_msg_count(config_word, val) \
+ ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
+#define netxen_set_msg_ctxid(config_word, val) \
+ ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
+#define netxen_set_msg_opcode(config_word, val) \
+ ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
+
+struct netxen_rcv_ring {
+ __le64 addr;
+ __le32 size;
+ __le32 rsrvd;
+};
+
+struct netxen_sts_ring {
+ __le64 addr;
+ __le32 size;
+ __le16 msi_index;
+ __le16 rsvd;
+} ;
+
+struct netxen_ring_ctx {
+
+ /* one command ring */
+ __le64 cmd_consumer_offset;
+ __le64 cmd_ring_addr;
+ __le32 cmd_ring_size;
+ __le32 rsrvd;
+
+ /* three receive rings */
+ struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
+
+ __le64 sts_ring_addr;
+ __le32 sts_ring_size;
+
+ __le32 ctx_id;
+
+ __le64 rsrvd_2[3];
+ __le32 sts_ring_count;
+ __le32 rsrvd_3;
+ struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED 0x01
+#define FLAGS_LSO_ENABLED 0x02
+#define FLAGS_IPSEC_SA_ADD 0x04
+#define FLAGS_IPSEC_SA_DELETE 0x08
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define netxen_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+
+#define netxen_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define netxen_set_tx_port(_desc, _port) \
+ (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
+
+#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ (_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
+
+#define netxen_set_tx_frags_len(_desc, _frags, _len) \
+ (_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define NETXEN_NIC_SYN_OFFLOAD 0x03
+#define NETXEN_NIC_RXPKT_DESC 0x04
+#define NETXEN_OLD_RXPKT_DESC 0x3f
+#define NETXEN_NIC_RESPONSE_DESC 0x05
+#define NETXEN_NIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM (1)
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define netxen_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define netxen_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define netxen_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define netxen_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define netxen_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define netxen_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define netxen_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define netxen_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define netxen_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define netxen_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define netxen_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define netxen_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define netxen_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+#define netxen_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define NX_UNI_DIR_SECT_BOOTLD 0x6
+#define NX_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF 10
+#define NX_UNI_FLAGS_OFF 11
+#define NX_UNI_BIOS_VERSION_OFF 12
+#define NX_UNI_BOOTLD_IDX_OFF 27
+#define NX_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ uint32_t findex;
+ uint32_t num_entries;
+ uint32_t entry_size;
+ uint32_t reserved[5];
+};
+
+struct uni_data_desc{
+ uint32_t findex;
+ uint32_t size;
+ uint32_t reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
+/* The version of the main data structure */
+#define NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+#define NETXEN_BRDTYPE_P1_BD 0x0000
+#define NETXEN_BRDTYPE_P1_SB 0x0001
+#define NETXEN_BRDTYPE_P1_SMAX 0x0002
+#define NETXEN_BRDTYPE_P1_SOCK 0x0003
+
+#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008
+#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009
+#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a
+#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b
+#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c
+
+#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
+#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
+#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f
+
+#define NETXEN_BRDTYPE_P3_REF_QG 0x0021
+#define NETXEN_BRDTYPE_P3_HMEZ 0x0022
+#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define NETXEN_BRDTYPE_P3_4_GB 0x0024
+#define NETXEN_BRDTYPE_P3_IMEZ 0x0025
+#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
+#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028
+#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029
+#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031
+#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032
+#define NETXEN_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define NETXEN_CRBINIT_START 0 /* crbinit section */
+#define NETXEN_BRDCFG_START 0x4000 /* board config */
+#define NETXEN_INITCODE_START 0x6000 /* pegtune code */
+#define NETXEN_BOOTLD_START 0x10000 /* bootld */
+#define NETXEN_IMAGE_START 0x43000 /* compressed image */
+#define NETXEN_SECONDARY_START 0x200000 /* backup images */
+#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
+#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
+#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
+
+#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START)
+#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408)
+#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c)
+#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418)
+#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c)
+#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c)
+
+#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START)
+#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8)
+#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128)
+
+#define NX_FW_MIN_SIZE (0x3fffff)
+#define NX_P2_MN_ROMIMAGE 0
+#define NX_P3_CT_ROMIMAGE 1
+#define NX_P3_MN_ROMIMAGE 2
+#define NX_UNIFIED_ROMIMAGE 3
+#define NX_FLASH_ROMIMAGE 4
+#define NX_UNKNOWN_ROMIMAGE 0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME "flash"
+
+extern char netxen_nic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct netxen_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define NETXEN_BUFFER_FREE 0
+#define NETXEN_BUFFER_BUSY 1
+
+/*
+ * There will be one netxen_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+ struct sk_buff *skb;
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define NETXEN_NIC_GBE 0x01
+#define NETXEN_NIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *pci_base1;
+ void __iomem *pci_base2;
+ void __iomem *db_base;
+ void __iomem *ocm_win_crb;
+
+ unsigned long db_len;
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ spinlock_t mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+
+struct netxen_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct nx_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct netxen_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct nx_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct netxen_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct nx_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ void __iomem *crb_cmd_consumer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct nx_host_rds_ring *rds_rings;
+ struct nx_host_sds_ring *sds_rings;
+
+ struct netxen_ring_ctx *hwctx;
+ dma_addr_t phys_addr;
+};
+
+struct _cdrp_cmd {
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct netxen_cmd_args {
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+};
+
+/* New HW context creation */
+
+#define NX_OS_CRB_RETRY_COUNT 4000
+#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define NX_CDRP_CLEAR 0x00000000
+#define NX_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the NX_CDRP_CMD_BIT cleared
+ * in the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_RSP(rsp) (rsp)
+#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
+
+#define NX_CDRP_RSP_OK 0x00000001
+#define NX_CDRP_RSP_FAIL 0x00000002
+#define NX_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the NX_CDRP_CMD_BIT set in
+ * the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
+#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
+
+#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define NX_CDRP_CMD_SET_MTU 0x00000012
+#define NX_CDRP_CMD_READ_PHY 0x00000013
+#define NX_CDRP_CMD_WRITE_PHY 0x00000014
+#define NX_CDRP_CMD_READ_HW_REG 0x00000015
+#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
+#define NX_CDRP_CMD_MAX 0x00000020
+
+#define NX_RCODE_SUCCESS 0
+#define NX_RCODE_NO_HOST_MEM 1
+#define NX_RCODE_NO_HOST_RESOURCE 2
+#define NX_RCODE_NO_CARD_CRB 3
+#define NX_RCODE_NO_CARD_MEM 4
+#define NX_RCODE_NO_CARD_RESOURCE 5
+#define NX_RCODE_INVALID_ARGS 6
+#define NX_RCODE_INVALID_ACTION 7
+#define NX_RCODE_INVALID_STATE 8
+#define NX_RCODE_NOT_SUPPORTED 9
+#define NX_RCODE_NOT_PERMITTED 10
+#define NX_RCODE_NOT_READY 11
+#define NX_RCODE_DOES_NOT_EXIST 12
+#define NX_RCODE_ALREADY_EXISTS 13
+#define NX_RCODE_BAD_SIGNATURE 14
+#define NX_RCODE_CMD_NOT_IMPL 15
+#define NX_RCODE_CMD_INVALID 16
+#define NX_RCODE_TIMEOUT 17
+#define NX_RCODE_CMD_FAILED 18
+#define NX_RCODE_MAX_EXCEEDED 19
+#define NX_RCODE_MAX 20
+
+#define NX_DESTROY_CTX_RESET 0
+#define NX_DESTROY_CTX_D3_RESET 1
+#define NX_DESTROY_CTX_MAX 2
+
+/*
+ * Capabilities
+ */
+#define NX_CAP_BIT(class, bit) (1 << bit)
+#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
+#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
+#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
+#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
+#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
+#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
+#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
+#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
+#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
+#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10)
+#define NX_CAP0_HW_LRO_MSS NX_CAP_BIT(0, 21)
+
+/*
+ * Context state
+ */
+#define NX_HOST_CTX_STATE_FREED 0
+#define NX_HOST_CTX_STATE_ALLOCATED 1
+#define NX_HOST_CTX_STATE_ACTIVE 2
+#define NX_HOST_CTX_STATE_DISABLED 3
+#define NX_HOST_CTX_STATE_QUIESCED 4
+#define NX_HOST_CTX_STATE_MAX 5
+
+/*
+ * Rx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} nx_hostrq_sds_ring_t;
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} nx_hostrq_rds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} nx_hostrq_rx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} nx_cardrsp_rds_ring_t;
+
+typedef struct {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_sds_ring_t;
+
+typedef struct {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} nx_cardrsp_rx_ctx_t;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
+
+/*
+ * Tx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} nx_hostrq_cds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} nx_hostrq_tx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_cds_ring_t;
+
+typedef struct {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} nx_cardrsp_tx_ctx_t;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
+#define NX_HOST_RDS_CRB_MODE_SHARED 1
+#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
+#define NX_HOST_RDS_CRB_MODE_MAX 3
+
+#define NX_HOST_INT_CRB_MODE_UNIQUE 0
+#define NX_HOST_INT_CRB_MODE_SHARED 1
+#define NX_HOST_INT_CRB_MODE_NORX 2
+#define NX_HOST_INT_CRB_MODE_NOTX 3
+#define NX_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P2 16
+#define MC_COUNT_P3 38
+
+#define NETXEN_MAC_NOOP 0
+#define NETXEN_MAC_ADD 1
+#define NETXEN_MAC_DEL 2
+
+typedef struct nx_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+} nx_mac_list_t;
+
+struct nx_ip_list {
+ struct list_head list;
+ __be32 ip_addr;
+ bool master;
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define NETXEN_NIC_INTR_DEFAULT 0x04
+
+typedef union {
+ struct {
+ uint16_t rx_packets;
+ uint16_t rx_time_us;
+ uint16_t tx_packets;
+ uint16_t tx_time_us;
+ } data;
+ uint64_t word;
+} nx_nic_intr_coalesce_data_t;
+
+typedef struct {
+ uint16_t stats_time_us;
+ uint16_t rate_sample_time;
+ uint16_t flags;
+ uint16_t rsvd_1;
+ uint32_t low_threshold;
+ uint32_t high_threshold;
+ nx_nic_intr_coalesce_data_t normal;
+ nx_nic_intr_coalesce_data_t low;
+ nx_nic_intr_coalesce_data_t high;
+ nx_nic_intr_coalesce_data_t irq;
+} nx_nic_intr_coalesce_t;
+
+#define NX_HOST_REQUEST 0x13
+#define NX_NIC_REQUEST 0x14
+
+#define NX_MAC_EVENT 0x1
+
+#define NX_IP_UP 2
+#define NX_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define NX_NIC_H2C_OPCODE_START 0
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define NX_NIC_H2C_OPCODE_CONFIG_LED 4
+#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7
+#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16
+#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21
+#define NX_NIC_C2C_OPCODE 22
+#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define NX_NIC_H2C_OPCODE_LAST 25
+
+/*
+ * Firmware --> Driver
+ */
+
+#define NX_NIC_C2H_OPCODE_START 128
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define NX_NIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define NX_NIC_LRO_REQUEST_FIRST 0
+#define NX_NIC_LRO_REQUEST_ADD_FLOW 1
+#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2
+#define NX_NIC_LRO_REQUEST_TIMER 3
+#define NX_NIC_LRO_REQUEST_CLEANUP 4
+#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5
+#define NX_TOE_LRO_REQUEST_ADD_FLOW 6
+#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9
+#define NX_TOE_LRO_REQUEST_TIMER 10
+#define NX_NIC_LRO_REQUEST_LAST 11
+
+#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5)
+#define NX_FW_CAPABILITY_SWITCHING (1 << 6)
+#define NX_FW_CAPABILITY_PEXQ (1 << 7)
+#define NX_FW_CAPABILITY_BDG (1 << 8)
+#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
+#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
+#define NX_FW_CAPABILITY_MORE_CAPS (1 << 31)
+#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG (1 << 2)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0xEF10AF12
+#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define netxen_get_nic_msgtype(msg_hdr) \
+ ((msg_hdr >> 58) & 0x3F)
+#define netxen_get_nic_msg_compid(msg_hdr) \
+ ((msg_hdr >> 40) & 0xFF)
+#define netxen_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+#define netxen_get_nic_msg_errcode(msg_hdr) \
+ ((msg_hdr >> 16) & 0xFFFF)
+
+typedef struct {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+} nx_fw_msg_t;
+
+typedef struct {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} nx_nic_req_t;
+
+typedef struct {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+} nx_mac_req_t;
+
+#define MAX_PENDING_DESC_BLOCK_SIZE 64
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_NIC_MSIX_ENABLED 0x04
+#define NETXEN_NIC_LRO_ENABLED 0x08
+#define NETXEN_NIC_LRO_DISABLED 0x00
+#define NETXEN_NIC_BRIDGE_ENABLED 0X10
+#define NETXEN_NIC_DIAG_ENABLED 0x20
+#define NETXEN_FW_RESET_OWNER 0x40
+#define NETXEN_FW_MSS_CAP 0x80
+#define NETXEN_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define NETXEN_MSIX_TBL_SPACE 8192
+#define NETXEN_PCI_REG_MSIX_TBL 0x44
+
+#define NETXEN_DB_MAPSIZE_BYTES 0x1000
+
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+#define __NX_FW_ATTACHED 0
+#define __NX_DEV_UP 1
+#define __NX_RESETTING 2
+
+/* Mini Coredump FW supported version */
+#define NX_MD_SUPPORT_MAJOR 4
+#define NX_MD_SUPPORT_MINOR 0
+#define NX_MD_SUPPORT_SUBVERSION 579
+
+#define LSW(x) ((uint16_t)(x))
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+/* Mini Coredump mask level */
+#define NX_DUMP_MASK_MIN 0x03
+#define NX_DUMP_MASK_DEF 0x1f
+#define NX_DUMP_MASK_MAX 0xff
+
+/* Mini Coredump CDRP commands */
+#define NX_CDRP_CMD_TEMP_SIZE 0x0000002f
+#define NX_CDRP_CMD_GET_TEMP_HDR 0x00000030
+
+
+#define NX_DUMP_STATE_ARRAY_LEN 16
+#define NX_DUMP_CAP_SIZE_ARRAY_LEN 8
+
+/* Mini Coredump sysfs entries flags*/
+#define NX_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define NX_ENABLE_FW_DUMP 0xaddfeed
+#define NX_DISABLE_FW_DUMP 0xbadfeed
+#define NX_FORCE_FW_RESET 0xdeaddead
+
+
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
+/* Flash read/write address */
+#define NX_FW_DUMP_REG1 0x00130060
+#define NX_FW_DUMP_REG2 0x001e0000
+#define NX_FLASH_SEM2_LK 0x0013C010
+#define NX_FLASH_SEM2_ULK 0x0013C014
+#define NX_FLASH_LOCK_ID 0x001B2100
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+/* Mini Coredump register read/write routine */
+#define NX_RD_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + \
+ LSW(addr))); \
+} while (0)
+
+#define NX_WR_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr))); \
+} while (0)
+
+
+/*
+Entry Type Defines
+*/
+
+#define RDNOP 0
+#define RDCRB 1
+#define RDMUX 2
+#define QUEUE 3
+#define BOARD 4
+#define RDSRE 5
+#define RDOCM 6
+#define PREGS 7
+#define L1DTG 8
+#define L1ITG 9
+#define CACHE 10
+
+#define L1DAT 11
+#define L1INS 12
+#define RDSTK 13
+#define RDCON 14
+
+#define L2DTG 21
+#define L2ITG 22
+#define L2DAT 23
+#define L2INS 24
+#define RDOC3 25
+
+#define MEMBK 32
+
+#define RDROM 71
+#define RDMEM 72
+#define RDMN 73
+
+#define INFOR 81
+#define CNTRL 98
+
+#define TLHDR 99
+#define RDEND 255
+
+#define PRIMQ 103
+#define SQG2Q 104
+#define SQG3Q 105
+
+/*
+* Opcodes for Control Entries.
+* These Flags are bit fields.
+*/
+#define NX_DUMP_WCRB 0x01
+#define NX_DUMP_RWCRB 0x02
+#define NX_DUMP_ANDCRB 0x04
+#define NX_DUMP_ORCRB 0x08
+#define NX_DUMP_POLLCRB 0x10
+#define NX_DUMP_RD_SAVE 0x20
+#define NX_DUMP_WRT_SAVED 0x40
+#define NX_DUMP_MOD_SAVE_ST 0x80
+
+/* Driver Flags */
+#define NX_DUMP_SKIP 0x80 /* driver skipped this entry */
+#define NX_DUMP_SIZE_ERR 0x40 /*entry size vs capture size mismatch*/
+
+#define NX_PCI_READ_32(ADDR) readl((ADDR))
+#define NX_PCI_WRITE_32(DATA, ADDR) writel(DATA, (ADDR))
+
+
+
+struct netxen_minidump {
+ u32 pos; /* position in the dump buffer */
+ u8 fw_supports_md; /* FW supports Mini cordump */
+ u8 has_valid_dump; /* indicates valid dump */
+ u8 md_capture_mask; /* driver capture mask */
+ u8 md_enabled; /* Turn Mini Coredump on/off */
+ u32 md_dump_size; /* Total FW Mini Coredump size */
+ u32 md_capture_size; /* FW dump capture size */
+ u32 md_template_size; /* FW template size */
+ u32 md_template_ver; /* FW template version */
+ u64 md_timestamp; /* FW Mini dump timestamp */
+ void *md_template; /* FW template will be stored */
+ void *md_capture_buff; /* FW dump will be stored */
+};
+
+
+
+struct netxen_minidump_template_hdr {
+ u32 entry_type;
+ u32 first_entry_offset;
+ u32 size_of_template;
+ u32 capture_mask;
+ u32 num_of_entries;
+ u32 version;
+ u32 driver_timestamp;
+ u32 checksum;
+ u32 driver_capture_mask;
+ u32 driver_info_word2;
+ u32 driver_info_word3;
+ u32 driver_info_word4;
+ u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
+ u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
+ u32 rsvd[0];
+};
+
+/* Common Entry Header: Common to All Entry Types */
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+
+struct netxen_common_entry_hdr {
+ u32 entry_type;
+ u32 entry_size;
+ u32 entry_capture_size;
+ union {
+ struct {
+ u8 entry_capture_mask;
+ u8 entry_code;
+ u8 driver_code;
+ u8 driver_flags;
+ };
+ u32 entry_ctrl_word;
+ };
+};
+
+
+/* Generic Entry Including Header */
+struct netxen_minidump_entry {
+ struct netxen_common_entry_hdr hdr;
+ u32 entry_data00;
+ u32 entry_data01;
+ u32 entry_data02;
+ u32 entry_data03;
+ u32 entry_data04;
+ u32 entry_data05;
+ u32 entry_data06;
+ u32 entry_data07;
+};
+
+/* Read ROM Header */
+struct netxen_minidump_entry_rdrom {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 write_mask;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u32 read_mask;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+
+/* Read CRB and Control Entry Header */
+struct netxen_minidump_entry_crb {
+ struct netxen_common_entry_hdr h;
+ u32 addr;
+ union {
+ struct {
+ u8 addr_stride;
+ u8 state_index_a;
+ u16 poll_timeout;
+ };
+ u32 addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ union {
+ struct {
+ u8 opcode;
+ u8 state_index_v;
+ u8 shl;
+ u8 shr;
+ };
+ u32 control_value;
+ };
+ u32 value_1;
+ u32 value_2;
+ u32 value_3;
+};
+
+/* Read Memory and MN Header */
+struct netxen_minidump_entry_rdmem {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 cntrl_addr_reg;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u8 wr_byte0;
+ u8 wr_byte1;
+ u8 poll_mask;
+ u8 poll_cnt;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+/* Read Cache L1 and L2 Header */
+struct netxen_minidump_entry_cache {
+ struct netxen_common_entry_hdr h;
+ u32 tag_reg_addr;
+ union {
+ struct {
+ u16 tag_value_stride;
+ u16 init_tag_value;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 control_addr;
+ union {
+ struct {
+ u16 write_value;
+ u8 poll_mask;
+ u8 poll_wait;
+ };
+ u32 control_value;
+ };
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_1;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read OCM Header */
+struct netxen_minidump_entry_rdocm {
+ struct netxen_common_entry_hdr h;
+ u32 rsvd_0;
+ union {
+ struct {
+ u32 rsvd_1;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_2;
+ u32 rsvd_3;
+ u32 read_addr;
+ union {
+ struct {
+ u32 read_addr_stride;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read MUX Header */
+struct netxen_minidump_entry_mux {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u32 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 select_value;
+ u32 select_value_stride;
+ u32 read_addr;
+ u32 rsvd_1;
+};
+
+/* Read Queue Header */
+struct netxen_minidump_entry_queue {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u16 queue_id_stride;
+ u16 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_1;
+ u32 rsvd_2;
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_3;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+struct netxen_dummy_dma {
+ void *addr;
+ dma_addr_t phys_addr;
+};
+
+struct netxen_adapter {
+ struct netxen_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+ struct list_head ip_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 __pad;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 link_changed;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct netxen_adapter_stats stats;
+
+ struct netxen_recv_context recv_ctx;
+ struct nx_host_tx_ring *tx_ring;
+
+ int (*macaddr_set) (struct netxen_adapter *, u8 *);
+ int (*set_mtu) (struct netxen_adapter *, int);
+ int (*set_promisc) (struct netxen_adapter *, u32);
+ void (*set_multi) (struct net_device *);
+ int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *);
+ int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val);
+ int (*init_port) (struct netxen_adapter *, int);
+ int (*stop_port) (struct netxen_adapter *);
+
+ u32 (*crb_read)(struct netxen_adapter *, ulong);
+ int (*crb_write)(struct netxen_adapter *, ulong, u32);
+
+ int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
+ int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
+
+ int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
+
+ u32 (*io_read)(struct netxen_adapter *, void __iomem *);
+ void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *pci_int_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct netxen_dummy_dma dummy_dma;
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ nx_nic_intr_coalesce_t coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+ struct netxen_minidump mdump; /* mdump ptr */
+ int fw_mdump_rdy; /* for mdump ready */
+};
+
+int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
+int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val);
+
+#define NXRD32(adapter, off) \
+ (adapter->crb_read(adapter, off))
+#define NXWR32(adapter, off, val) \
+ (adapter->crb_write(adapter, off, val))
+#define NXRDIO(adapter, addr) \
+ (adapter->io_read(adapter, addr))
+#define NXWRIO(adapter, addr, val) \
+ (adapter->io_write(adapter, addr, val))
+
+int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32);
+void netxen_pcie_sem_unlock(struct netxen_adapter *, int);
+
+#define netxen_rom_lock(a) \
+ netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID)
+#define netxen_rom_unlock(a) \
+ netxen_pcie_sem_unlock((a), 2)
+#define netxen_phy_lock(a) \
+ netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID)
+#define netxen_phy_unlock(a) \
+ netxen_pcie_sem_unlock((a), 3)
+#define netxen_api_lock(a) \
+ netxen_pcie_sem_lock((a), 5, 0)
+#define netxen_api_unlock(a) \
+ netxen_pcie_sem_unlock((a), 5)
+#define netxen_sw_lock(a) \
+ netxen_pcie_sem_lock((a), 6, 0)
+#define netxen_sw_unlock(a) \
+ netxen_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ netxen_pcie_sem_unlock((a), 7)
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_wol_supported(struct netxen_adapter *adapter);
+
+/* Functions from netxen_nic_init.c */
+int netxen_init_dummy_dma(struct netxen_adapter *adapter);
+void netxen_free_dummy_dma(struct netxen_adapter *adapter);
+
+int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter);
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
+void netxen_request_firmware(struct netxen_adapter *adapter);
+void netxen_release_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter);
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_flash_unlock(struct netxen_adapter *adapter);
+int netxen_backup_crbinit(struct netxen_adapter *adapter);
+int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
+int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+void netxen_halt_pegs(struct netxen_adapter *adapter);
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
+void netxen_free_sw_resources(struct netxen_adapter *adapter);
+
+void netxen_setup_hwops(struct netxen_adapter *adapter);
+void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32);
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter);
+void netxen_release_tx_buffers(struct netxen_adapter *adapter);
+
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+void netxen_watchdog_task(struct work_struct *work);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring);
+int netxen_process_cmd_ring(struct netxen_adapter *adapter);
+int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
+int netxen_config_rss(struct netxen_adapter *adapter, int enable);
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd);
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
+
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg);
+int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
+int netxen_setup_minidump(struct netxen_adapter *adapter);
+void netxen_dump_fw(struct netxen_adapter *adapter);
+void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring);
+
+/* Functions from netxen_nic_main.c */
+int netxen_nic_reset_context(struct netxen_adapter *);
+
+int nx_dev_request_reset(struct netxen_adapter *adapter);
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 32
+struct netxen_brdinfo {
+ int brdtype; /* type of board */
+ long ports; /* max no of physical ports */
+ char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+struct netxen_dimm_cfg {
+ u8 presence;
+ u8 mem_type;
+ u8 dimm_type;
+ u32 size;
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+ {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+ {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+ {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+ {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "},
+ {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"},
+ {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"},
+ {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+ {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+ {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
+ {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
+ {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+ {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
+
+static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
+{
+ int i, found = 0;
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (netxen_boards[i].brdtype == type) {
+ strcpy(name, netxen_boards[i].short_name);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ strcpy(name, "Unknown");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ return find_diff_among(tx_ring->producer,
+ tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+
+extern const struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif /* __NETXEN_NIC_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
new file mode 100644
index 000000000..b8d527035
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+
+#define NXHAL_VERSION 1
+
+static u32
+netxen_poll_rsp(struct netxen_adapter *adapter)
+{
+ u32 rsp = NX_CDRP_RSP_OK;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > NX_OS_CRB_RETRY_COUNT)
+ return NX_CDRP_RSP_TIMEOUT;
+
+ rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
+ } while (!NX_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+static u32
+netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
+{
+ u32 rsp;
+ u32 signature = 0;
+ u32 rcode = NX_RCODE_SUCCESS;
+
+ signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
+ NXHAL_VERSION);
+ /* Acquire semaphore before accessing CRB */
+ if (netxen_api_lock(adapter))
+ return NX_RCODE_TIMEOUT;
+
+ NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
+
+ NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
+
+ NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
+
+ NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
+
+ NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
+
+ rsp = netxen_poll_rsp(adapter);
+
+ if (rsp == NX_CDRP_RSP_TIMEOUT) {
+ printk(KERN_ERR "%s: card response timeout.\n",
+ netxen_nic_driver_name);
+
+ rcode = NX_RCODE_TIMEOUT;
+ } else if (rsp == NX_CDRP_RSP_FAIL) {
+ rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+
+ printk(KERN_ERR "%s: failed card response code:0x%x\n",
+ netxen_nic_driver_name, rcode);
+ } else if (rsp == NX_CDRP_RSP_OK) {
+ cmd->rsp.cmd = NX_RCODE_SUCCESS;
+ if (cmd->rsp.arg2)
+ cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
+ if (cmd->rsp.arg3)
+ cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
+ }
+
+ if (cmd->rsp.arg1)
+ cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+ /* Release semaphore */
+ netxen_api_unlock(adapter);
+
+ return rcode;
+}
+
+static int
+netxen_get_minidump_template_size(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_args cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ netxen_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Can't get template size %d\n", cmd.rsp.cmd);
+ return -EIO;
+ }
+ adapter->mdump.md_template_size = cmd.rsp.arg2;
+ adapter->mdump.md_template_ver = cmd.rsp.arg3;
+ return 0;
+}
+
+static int
+netxen_get_minidump_template(struct netxen_adapter *adapter)
+{
+ dma_addr_t md_template_addr;
+ void *addr;
+ u32 size;
+ struct netxen_cmd_args cmd;
+ size = adapter->mdump.md_template_size;
+
+ if (size == 0) {
+ dev_err(&adapter->pdev->dev, "Can not capture Minidump "
+ "template. Invalid template size.\n");
+ return NX_RCODE_INVALID_ARGS;
+ }
+
+ addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+ if (!addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
+ return -ENOMEM;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
+ cmd.req.arg1 = LSD(md_template_addr);
+ cmd.req.arg2 = MSD(md_template_addr);
+ cmd.req.arg3 |= size;
+ netxen_issue_cmd(adapter, &cmd);
+
+ if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
+ memcpy(adapter->mdump.md_template, addr, size);
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
+ "err_code : %d, requested_size : %d, actual_size : %d\n ",
+ cmd.rsp.cmd, size, cmd.rsp.arg2);
+ }
+ pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ return 0;
+}
+
+static u32
+netxen_check_template_checksum(struct netxen_adapter *adapter)
+{
+ u64 sum = 0 ;
+ u32 *buff = adapter->mdump.md_template;
+ int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
+
+ while (count-- > 0)
+ sum += *buff++ ;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
+
+ return ~sum;
+}
+
+int
+netxen_setup_minidump(struct netxen_adapter *adapter)
+{
+ int err = 0, i;
+ u32 *template, *tmp_buf;
+ struct netxen_minidump_template_hdr *hdr;
+ err = netxen_get_minidump_template_size(adapter);
+ if (err) {
+ adapter->mdump.fw_supports_md = 0;
+ if ((err == NX_RCODE_CMD_INVALID) ||
+ (err == NX_RCODE_CMD_NOT_IMPL)) {
+ dev_info(&adapter->pdev->dev,
+ "Flashed firmware version does not support minidump, "
+ "minimum version required is [ %u.%u.%u ].\n ",
+ NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
+ NX_MD_SUPPORT_SUBVERSION);
+ }
+ return err;
+ }
+
+ if (!adapter->mdump.md_template_size) {
+ dev_err(&adapter->pdev->dev, "Error : Invalid template size "
+ ",should be non-zero.\n");
+ return -EIO;
+ }
+ adapter->mdump.md_template =
+ kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
+
+ if (!adapter->mdump.md_template)
+ return -ENOMEM;
+
+ err = netxen_get_minidump_template(adapter);
+ if (err) {
+ if (err == NX_RCODE_CMD_NOT_IMPL)
+ adapter->mdump.fw_supports_md = 0;
+ goto free_template;
+ }
+
+ if (netxen_check_template_checksum(adapter)) {
+ dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
+ err = -EIO;
+ goto free_template;
+ }
+
+ adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
+ tmp_buf = (u32 *) adapter->mdump.md_template;
+ template = (u32 *) adapter->mdump.md_template;
+ for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
+ *template++ = __le32_to_cpu(*tmp_buf++);
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ adapter->mdump.md_capture_buff = NULL;
+ adapter->mdump.fw_supports_md = 1;
+ adapter->mdump.md_enabled = 0;
+
+ return err;
+
+free_template:
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ return err;
+}
+
+
+int
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
+{
+ u32 rcode = NX_RCODE_SUCCESS;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = mtu;
+ cmd.req.arg3 = 0;
+
+ if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+ netxen_issue_cmd(adapter, &cmd);
+
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg)
+{
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
+ cmd.req.arg1 = speed;
+ cmd.req.arg2 = duplex;
+ cmd.req.arg3 = autoneg;
+ return netxen_issue_cmd(adapter, &cmd);
+}
+
+static int
+nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
+{
+ void *addr;
+ nx_hostrq_rx_ctx_t *prq;
+ nx_cardrsp_rx_ctx_t *prsp;
+ nx_hostrq_rds_ring_t *prq_rds;
+ nx_hostrq_sds_ring_t *prq_sds;
+ nx_cardrsp_rds_ring_t *prsp_rds;
+ nx_cardrsp_sds_ring_t *prsp_sds;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_cmd_args cmd;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+
+ int err;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
+ cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
+
+ if (adapter->flags & NETXEN_FW_MSS_CAP)
+ cap |= NX_CAP0_HW_LRO_MSS;
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
+ if (err) {
+ printk(KERN_WARNING
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((nx_cardrsp_rds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((nx_cardrsp_sds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
+
+ if (netxen_issue_cmd(adapter, &cmd)) {
+ printk(KERN_WARNING
+ "%s: Failed to destroy rx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+static int
+nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
+{
+ nx_hostrq_tx_ctx_t *prq;
+ nx_hostrq_cds_ring_t *prq_cds;
+ nx_cardrsp_tx_ctx_t *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err = 0;
+ u64 offset, phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+
+ prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+ offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
+ prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
+
+ if (err == NX_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(temp - 0x200));
+#if 0
+ adapter->tx_state =
+ le32_to_cpu(prsp->host_ctx_state);
+#endif
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ printk(KERN_WARNING
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
+ if (netxen_issue_cmd(adapter, &cmd)) {
+ printk(KERN_WARNING
+ "%s: Failed to destroy tx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+int
+nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
+{
+ u32 rcode;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = 0;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
+ cmd.rsp.arg1 = 1;
+ rcode = netxen_issue_cmd(adapter, &cmd);
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ if (val == NULL)
+ return -EIO;
+
+ *val = cmd.rsp.arg1;
+ return 0;
+}
+
+int
+nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
+{
+ u32 rcode;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = val;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
+ rcode = netxen_issue_cmd(adapter, &cmd);
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static u64 ctx_addr_sig_regs[][3] = {
+ {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+ {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+ {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+ {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
+#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
+#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
+
+#define lower32(x) ((u32)((x) & 0xffffffff))
+#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
+
+static struct netxen_recv_crb recv_crb_registers[] = {
+ /* Instance 0 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x100),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x110),
+ /* LRO */
+ NETXEN_NIC_REG(0x120)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x138),
+ NETXEN_NIC_REG_2(0x000),
+ NETXEN_NIC_REG_2(0x004),
+ NETXEN_NIC_REG_2(0x008),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_0,
+ NETXEN_NIC_REG_2(0x044),
+ NETXEN_NIC_REG_2(0x048),
+ NETXEN_NIC_REG_2(0x04c),
+ },
+ },
+ /* Instance 1 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x144),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x154),
+ /* LRO */
+ NETXEN_NIC_REG(0x164)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x17c),
+ NETXEN_NIC_REG_2(0x020),
+ NETXEN_NIC_REG_2(0x024),
+ NETXEN_NIC_REG_2(0x028),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_1,
+ NETXEN_NIC_REG_2(0x064),
+ NETXEN_NIC_REG_2(0x068),
+ NETXEN_NIC_REG_2(0x06c),
+ },
+ },
+ /* Instance 2 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x1d8),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x1f8),
+ /* LRO */
+ NETXEN_NIC_REG(0x208)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x220),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_2,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+ /* Instance 3 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x22c),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x23c),
+ /* LRO */
+ NETXEN_NIC_REG(0x24c)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x264),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_3,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+};
+
+static int
+netxen_init_old_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+ int port = adapter->portnum;
+ struct netxen_ring_ctx *hwctx;
+ u32 signature;
+
+ tx_ring = adapter->tx_ring;
+ recv_ctx = &adapter->recv_ctx;
+ hwctx = recv_ctx->hwctx;
+
+ hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
+ hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ hwctx->rcv_rings[ring].addr =
+ cpu_to_le64(rds_ring->phys_addr);
+ hwctx->rcv_rings[ring].size =
+ cpu_to_le32(rds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == 0) {
+ hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
+ }
+ hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
+ hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
+ }
+ hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
+
+ signature = (adapter->max_sds_rings > 1) ?
+ NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
+
+ NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
+ lower32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
+ upper32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ signature | port);
+ return 0;
+}
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
+{
+ void *addr;
+ int err = 0;
+ int ring;
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int port = adapter->portnum;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ addr = pci_alloc_consistent(pdev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate hw context\n");
+ return -ENOMEM;
+ }
+
+ memset(addr, 0, sizeof(struct netxen_ring_ctx));
+ recv_ctx->hwctx = addr;
+ recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+ recv_ctx->hwctx->cmd_consumer_offset =
+ cpu_to_le64(recv_ctx->phys_addr +
+ sizeof(struct netxen_ring_ctx));
+ tx_ring->hw_consumer =
+ (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
+ netdev->name);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate rds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ rds_ring->crb_rcv_producer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_rcv_producer[ring]);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate sds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ sds_ring->crb_sts_consumer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_sts_consumer[ring]);
+
+ sds_ring->crb_intr_mask =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].sw_int_mask[ring]);
+ }
+ }
+
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+ err = nx_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = nx_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ } else {
+ err = netxen_init_old_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ }
+
+done:
+ return 0;
+
+err_out_free:
+ netxen_free_hw_resources(adapter);
+ return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ int port = adapter->portnum;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+
+ nx_fw_cmd_destroy_rx_ctx(adapter);
+ nx_fw_cmd_destroy_tx_ctx(adapter);
+ } else {
+ netxen_api_lock(adapter);
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ NETXEN_CTX_D3_RESET | port);
+ netxen_api_unlock(adapter);
+ }
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+
+done:
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->hwctx != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(struct netxen_ring_ctx) +
+ sizeof(uint32_t),
+ recv_ctx->hwctx,
+ recv_ctx->phys_addr);
+ recv_ctx->hwctx = NULL;
+ }
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
new file mode 100644
index 000000000..87e073c6e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -0,0 +1,959 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct netxen_nic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \
+ offsetof(struct netxen_adapter, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+ {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+ {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+ {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
+ {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+ {"csummed", NETXEN_NIC_STAT(stats.csummed)},
+ {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)},
+ {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)},
+ {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+ {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
+
+#define NETXEN_NIC_REGS_COUNT 30
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN 1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+ return NETXEN_FLASH_TOTAL_SIZE;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 fw_major = 0;
+ u32 fw_minor = 0;
+ u32 fw_build = 0;
+
+ strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
+ drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
+}
+
+static int
+netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->port = PORT_TP;
+
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ u32 val;
+
+ val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (val == NETXEN_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ ecmd->port = PORT_TP;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ u16 pcifn = adapter->ahw.pci_func;
+
+ val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val));
+ } else
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ case NETXEN_BRDTYPE_P2_SB31_2G:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = (adapter->ahw.board_type ==
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+ (AUTONEG_DISABLE) : (adapter->link_autoneg);
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = -1;
+ }
+ }
+
+ if (!netif_running(dev) || !adapter->ahw.linkup) {
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ }
+
+ return 0;
+}
+
+static int
+netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(ecmd);
+ int ret;
+
+ if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+ return -EOPNOTSUPP;
+
+ ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
+ ecmd->autoneg);
+ if (ret == NX_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = speed;
+ adapter->link_duplex = ecmd->duplex;
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+ return NETXEN_NIC_REGS_LEN;
+}
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
+
+ memset(p, 0, NETXEN_NIC_REGS_LEN);
+
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 netxen_nic_test_link(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 val, port;
+
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ return (val == XG_LINK_UP) ? 0 : 1;
+ }
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = netxen_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ netxen_nic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+netxen_nic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = netxen_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = netxen_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return netxen_nic_reset_context(adapter);
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+
+ pause->autoneg = 0;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(netxen_xg_get_xg1_mask(val));
+ } else {
+ printk(KERN_ERR"%s: Unknown board type: %x\n",
+ netxen_nic_driver_name, adapter->ahw.port_type);
+ }
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+
+ /* not supported */
+ if (pause->autoneg)
+ return -EINVAL;
+
+ /* read mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ netxen_gb_rx_flowctl(val);
+ else
+ netxen_gb_unset_rx_flowctl(val);
+
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb0_mask(val);
+ else
+ netxen_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb1_mask(val);
+ else
+ netxen_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb2_mask(val);
+ else
+ netxen_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb3_mask(val);
+ else
+ netxen_gb_set_gb3_mask(val);
+ break;
+ }
+ NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg0_mask(val);
+ else
+ netxen_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg1_mask(val);
+ else
+ netxen_xg_set_xg1_mask(val);
+ }
+ NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
+ } else {
+ printk(KERN_ERR "%s: Unknown board type: %x\n",
+ netxen_nic_driver_name,
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int netxen_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return NETXEN_NIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return NETXEN_NIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
+ if ((data[0] = netxen_nic_reg_test(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* link test */
+ if ((data[1] = (u64) netxen_nic_test_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *netxen_nic_gstrings_test,
+ NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ netxen_nic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ netxen_nic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (netxen_nic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+ }
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+ NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int netxen_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ netxen_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int netxen_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int
+netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+ if (adapter->fw_mdump_rdy)
+ dump->len = mdump->md_dump_size;
+ else
+ dump->len = 0;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+static int
+netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ int i;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+ switch (val->flag) {
+ case NX_FORCE_FW_DUMP_KEY:
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
+ if (adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
+ return 0;
+ }
+ netdev_info(netdev, "Forcing a fw dump\n");
+ nx_dev_request_reset(adapter);
+ break;
+ case NX_DISABLE_FW_DUMP:
+ if (mdump->md_enabled) {
+ netdev_info(netdev, "Disabling FW Dump\n");
+ mdump->md_enabled = 0;
+ }
+ break;
+ case NX_ENABLE_FW_DUMP:
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "Enabling FW dump\n");
+ mdump->md_enabled = 1;
+ }
+ break;
+ case NX_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing FW reset\n");
+ nx_dev_request_reset(adapter);
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ break;
+ default:
+ for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
+ if (val->flag == FW_DUMP_LEVELS[i]) {
+ mdump->md_capture_mask = val->flag;
+ netdev_info(netdev,
+ "Driver mask changed to: 0x%x\n",
+ mdump->md_capture_mask);
+ return 0;
+ }
+ }
+ netdev_info(netdev,
+ "Invalid dump level: 0x%x\n", val->flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr, *data;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+
+ if (!adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Dump not available\n");
+ return -EINVAL;
+ }
+ /* Copy template header first */
+ copy_sz = mdump->md_template_size;
+ hdr_ptr = (u32 *) mdump->md_template;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz,
+ mdump->md_capture_buff + mdump->md_template_size,
+ mdump->md_capture_size);
+ dump->len = copy_sz + mdump->md_capture_size;
+ dump->flag = mdump->md_capture_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(mdump->md_capture_buff);
+ mdump->md_capture_buff = NULL;
+ adapter->fw_mdump_rdy = 0;
+ netdev_info(netdev, "extracted the fw dump Successfully\n");
+ return 0;
+}
+
+const struct ethtool_ops netxen_nic_ethtool_ops = {
+ .get_settings = netxen_nic_get_settings,
+ .set_settings = netxen_nic_set_settings,
+ .get_drvinfo = netxen_nic_get_drvinfo,
+ .get_regs_len = netxen_nic_get_regs_len,
+ .get_regs = netxen_nic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = netxen_nic_get_eeprom_len,
+ .get_eeprom = netxen_nic_get_eeprom,
+ .get_ringparam = netxen_nic_get_ringparam,
+ .set_ringparam = netxen_nic_set_ringparam,
+ .get_pauseparam = netxen_nic_get_pauseparam,
+ .set_pauseparam = netxen_nic_set_pauseparam,
+ .get_wol = netxen_nic_get_wol,
+ .set_wol = netxen_nic_set_wol,
+ .self_test = netxen_nic_diag_test,
+ .get_strings = netxen_nic_get_strings,
+ .get_ethtool_stats = netxen_nic_get_ethtool_stats,
+ .get_sset_count = netxen_get_sset_count,
+ .get_coalesce = netxen_get_intr_coalesce,
+ .set_coalesce = netxen_set_intr_coalesce,
+ .get_dump_flag = netxen_get_dump_flag,
+ .get_dump_data = netxen_get_dump_data,
+ .set_dump = netxen_set_dump,
+};
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
new file mode 100644
index 000000000..a310c2f65
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t; /* single word in CRB space */
+
+enum {
+ NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+ NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+ NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+ NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+ NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+ NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+ NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+ NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+ NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+ NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+ NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+ NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+ NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+ NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+ NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+ NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+ NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+ NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+ NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+ NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+ NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+ NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+ NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+ NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGN1_CRB_AGT_ADR,
+ NETXEN_HW_PEGN2_CRB_AGT_ADR,
+ NETXEN_HW_PEGN3_CRB_AGT_ADR,
+ NETXEN_HW_PEGNI_CRB_AGT_ADR,
+ NETXEN_HW_PEGND_CRB_AGT_ADR,
+ NETXEN_HW_PEGNC_CRB_AGT_ADR,
+ NETXEN_HW_PEGR0_CRB_AGT_ADR,
+ NETXEN_HW_PEGR1_CRB_AGT_ADR,
+ NETXEN_HW_PEGR2_CRB_AGT_ADR,
+ NETXEN_HW_PEGR3_CRB_AGT_ADR,
+ NETXEN_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGS1_CRB_AGT_ADR,
+ NETXEN_HW_PEGS2_CRB_AGT_ADR,
+ NETXEN_HW_PEGS3_CRB_AGT_ADR,
+ NETXEN_HW_PEGSI_CRB_AGT_ADR,
+ NETXEN_HW_PEGSD_CRB_AGT_ADR,
+ NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+ NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+ NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+ NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+ NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+ NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+ NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+ NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+ NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ NETXEN_HW_PX_MAP_CRB_PH = 0,
+ NETXEN_HW_PX_MAP_CRB_PS,
+ NETXEN_HW_PX_MAP_CRB_MN,
+ NETXEN_HW_PX_MAP_CRB_MS,
+ NETXEN_HW_PX_MAP_CRB_PGR1,
+ NETXEN_HW_PX_MAP_CRB_SRE,
+ NETXEN_HW_PX_MAP_CRB_NIU,
+ NETXEN_HW_PX_MAP_CRB_QMN,
+ NETXEN_HW_PX_MAP_CRB_SQN0,
+ NETXEN_HW_PX_MAP_CRB_SQN1,
+ NETXEN_HW_PX_MAP_CRB_SQN2,
+ NETXEN_HW_PX_MAP_CRB_SQN3,
+ NETXEN_HW_PX_MAP_CRB_QMS,
+ NETXEN_HW_PX_MAP_CRB_SQS0,
+ NETXEN_HW_PX_MAP_CRB_SQS1,
+ NETXEN_HW_PX_MAP_CRB_SQS2,
+ NETXEN_HW_PX_MAP_CRB_SQS3,
+ NETXEN_HW_PX_MAP_CRB_PGN0,
+ NETXEN_HW_PX_MAP_CRB_PGN1,
+ NETXEN_HW_PX_MAP_CRB_PGN2,
+ NETXEN_HW_PX_MAP_CRB_PGN3,
+ NETXEN_HW_PX_MAP_CRB_PGND,
+ NETXEN_HW_PX_MAP_CRB_PGNI,
+ NETXEN_HW_PX_MAP_CRB_PGS0,
+ NETXEN_HW_PX_MAP_CRB_PGS1,
+ NETXEN_HW_PX_MAP_CRB_PGS2,
+ NETXEN_HW_PX_MAP_CRB_PGS3,
+ NETXEN_HW_PX_MAP_CRB_PGSD,
+ NETXEN_HW_PX_MAP_CRB_PGSI,
+ NETXEN_HW_PX_MAP_CRB_SN,
+ NETXEN_HW_PX_MAP_CRB_PGR2,
+ NETXEN_HW_PX_MAP_CRB_EG,
+ NETXEN_HW_PX_MAP_CRB_PH2,
+ NETXEN_HW_PX_MAP_CRB_PS2,
+ NETXEN_HW_PX_MAP_CRB_CAM,
+ NETXEN_HW_PX_MAP_CRB_CAS0,
+ NETXEN_HW_PX_MAP_CRB_CAS1,
+ NETXEN_HW_PX_MAP_CRB_CAS2,
+ NETXEN_HW_PX_MAP_CRB_C2C0,
+ NETXEN_HW_PX_MAP_CRB_C2C1,
+ NETXEN_HW_PX_MAP_CRB_TIMR,
+ NETXEN_HW_PX_MAP_CRB_PGR3,
+ NETXEN_HW_PX_MAP_CRB_RPMX1,
+ NETXEN_HW_PX_MAP_CRB_RPMX2,
+ NETXEN_HW_PX_MAP_CRB_RPMX3,
+ NETXEN_HW_PX_MAP_CRB_RPMX4,
+ NETXEN_HW_PX_MAP_CRB_RPMX5,
+ NETXEN_HW_PX_MAP_CRB_RPMX6,
+ NETXEN_HW_PX_MAP_CRB_RPMX7,
+ NETXEN_HW_PX_MAP_CRB_XDMA,
+ NETXEN_HW_PX_MAP_CRB_I2Q,
+ NETXEN_HW_PX_MAP_CRB_ROMUSB,
+ NETXEN_HW_PX_MAP_CRB_CAS3,
+ NETXEN_HW_PX_MAP_CRB_RPMX0,
+ NETXEN_HW_PX_MAP_CRB_RPMX8,
+ NETXEN_HW_PX_MAP_CRB_RPMX9,
+ NETXEN_HW_PX_MAP_CRB_OCM0,
+ NETXEN_HW_PX_MAP_CRB_OCM1,
+ NETXEN_HW_PX_MAP_CRB_SMB,
+ NETXEN_HW_PX_MAP_CRB_I2C0,
+ NETXEN_HW_PX_MAP_CRB_I2C1,
+ NETXEN_HW_PX_MAP_CRB_LPC,
+ NETXEN_HW_PX_MAP_CRB_PGNC,
+ NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c)
+#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000)
+
+#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC 0x3c
+
+#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A) \
+ (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB \
+ NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
+#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
+#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2)
+#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
+
+#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define NETXEN_PCI_MAPSIZE 128
+#define NETXEN_PCI_DDR_NET (0x00000000UL)
+#define NETXEN_PCI_QDR_NET (0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
+#define NETXEN_PCI_CAMQM (0x04800000UL)
+#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
+#define NETXEN_PCI_OCM0 (0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
+#define NETXEN_PCI_OCM1 (0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX (0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE (0x06000000UL)
+#define NETXEN_PCI_128MB_SIZE (0x08000000UL)
+#define NETXEN_PCI_32MB_SIZE (0x02000000UL)
+#define NETXEN_PCI_2MB_SIZE (0x00200000UL)
+
+#define NETXEN_PCI_MN_2M (0)
+#define NETXEN_PCI_MS_2M (0x80000)
+#define NETXEN_PCI_OCM0_2M (0x000c0000UL)
+#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define NETXEN_MIU_CONTROL (0x000)
+#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
+
+ /* 200ms delay in each loop */
+#define NETXEN_NIU_PHY_WAITLEN 200000
+ /* 10 seconds before we give up */
+#define NETXEN_NIU_PHY_WAITMAX 50
+#define NETXEN_NIU_MAX_GBE_PORTS 4
+#define NETXEN_NIU_MAX_XG_PORTS 2
+
+#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000)
+
+#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004)
+#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008)
+#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c)
+#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010)
+#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014)
+#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018)
+#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c)
+#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020)
+#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024)
+#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028)
+#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c)
+#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030)
+#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034)
+#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038)
+#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c)
+#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040)
+#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044)
+#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c)
+
+#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050)
+#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054)
+#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058)
+#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c)
+#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060)
+#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064)
+#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068)
+#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c)
+#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070)
+#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074)
+#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078)
+#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c)
+#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088)
+#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c)
+#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090)
+#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
+#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
+#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
+#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
+#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
+#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
+#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
+
+#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000)
+
+#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
+
+#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080)
+#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100)
+
+#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
+ (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
+ (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \
+ (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \
+ (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define NETXEN_NIU_GB_TEST_REG(I) \
+ (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \
+ (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \
+ (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \
+ (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \
+ (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_0(I) \
+ (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_1(I) \
+ (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000)
+#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004)
+#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010)
+#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014)
+#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018)
+#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020)
+#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024)
+#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028)
+#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c)
+#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030)
+#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034)
+#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038)
+#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c)
+#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040)
+#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044)
+#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048)
+#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c)
+#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050)
+#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
+
+/* P3 802.3ap */
+#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
+#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
+#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
+#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn,val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
+#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
+#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700))
+#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
+#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44)
+#define NETXEN_MSI_MODE 0x1
+#define NETXEN_INTX_MODE 0x2
+
+#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18))
+#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c))
+#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20))
+#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24))
+#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28))
+
+#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c))
+#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40))
+
+#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50))
+#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c))
+
+#define CRB_XG_STATE (NETXEN_NIC_REG(0x94))
+#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec))
+
+#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4))
+#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc))
+#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4))
+
+#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08))
+#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c))
+#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac))
+#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0))
+#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8))
+#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc))
+#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0))
+#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4))
+#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4))
+
+#define CRB_V2P_0 (NETXEN_NIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (NETXEN_CAM_RAM(0x12c))
+#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compatibility
+ */
+#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
+#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define nx_get_temp_val(x) ((x) >> 16)
+#define nx_get_temp_state(x) ((x) & 0xffff)
+#define nx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ NX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ NX_TEMP_WARN, /* Sound alert, temperature getting high */
+ NX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_CRB_WINDOW (0x10210)
+#define PCIX_CRB_WINDOW_F0 (0x10210)
+#define PCIX_CRB_WINDOW_F1 (0x10230)
+#define PCIX_CRB_WINDOW_F2 (0x10250)
+#define PCIX_CRB_WINDOW_F3 (0x10270)
+#define PCIX_CRB_WINDOW_F4 (0x102ac)
+#define PCIX_CRB_WINDOW_F5 (0x102bc)
+#define PCIX_CRB_WINDOW_F6 (0x102cc)
+#define PCIX_CRB_WINDOW_F7 (0x102dc)
+#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW (0x10200)
+#define PCIX_MN_WINDOW_F0 (0x10200)
+#define PCIX_MN_WINDOW_F1 (0x10220)
+#define PCIX_MN_WINDOW_F2 (0x10240)
+#define PCIX_MN_WINDOW_F3 (0x10260)
+#define PCIX_MN_WINDOW_F4 (0x102a0)
+#define PCIX_MN_WINDOW_F5 (0x102b0)
+#define PCIX_MN_WINDOW_F6 (0x102c0)
+#define PCIX_MN_WINDOW_F7 (0x102d0)
+#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW (0x10208)
+#define PCIX_SN_WINDOW_F0 (0x10208)
+#define PCIX_SN_WINDOW_F1 (0x10228)
+#define PCIX_SN_WINDOW_F2 (0x10248)
+#define PCIX_SN_WINDOW_F3 (0x10268)
+#define PCIX_SN_WINDOW_F4 (0x102a8)
+#define PCIX_SN_WINDOW_F5 (0x102b8)
+#define PCIX_SN_WINDOW_F6 (0x102c8)
+#define PCIX_SN_WINDOW_F7 (0x102d8)
+#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define PCIX_PS_MEM_SPACE (0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE (0x1404c)
+
+#define PCIE_DCR 0x00d8
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM1_LOCK (0x1c008)
+#define PCIE_SEM1_UNLOCK (0x1c00c)
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
+#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
+#define PCIE_SEM4_LOCK (0x1c020)
+#define PCIE_SEM4_UNLOCK (0x1c024)
+#define PCIE_SEM5_LOCK (0x1c028) /* API lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */
+#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */
+#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define NETXEN_PORT_MODE_NONE 0
+#define NETXEN_PORT_MODE_XG 1
+#define NETXEN_PORT_MODE_GB 2
+#define NETXEN_PORT_MODE_802_3_AP 3
+#define NETXEN_PORT_MODE_AUTO_NEG 4
+#define NETXEN_PORT_MODE_AUTO_NEG_1G 5
+#define NETXEN_PORT_MODE_AUTO_NEG_XG 6
+#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24))
+#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198))
+
+#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184))
+#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188))
+
+#define NX_PEG_TUNE_MN_PRESENT 0x1
+#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
+
+#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14))
+#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
+#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8))
+#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
+#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
+#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
+
+/* MiniDIMM related macros */
+#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
+#define NETXEN_DIMM_PRESENT 0x1
+#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2
+#define NETXEN_DIMM_SIZE 0x4
+#define NETXEN_DIMM_MEMTYPE(VAL) ((VAL >> 3) & 0xf)
+#define NETXEN_DIMM_NUMROWS(VAL) ((VAL >> 7) & 0xf)
+#define NETXEN_DIMM_NUMCOLS(VAL) ((VAL >> 11) & 0xf)
+#define NETXEN_DIMM_NUMRANKS(VAL) ((VAL >> 15) & 0x3)
+#define NETXEN_DIMM_DATAWIDTH(VAL) ((VAL >> 18) & 0x3)
+#define NETXEN_DIMM_NUMBANKS(VAL) ((VAL >> 21) & 0xf)
+#define NETXEN_DIMM_TYPE(VAL) ((VAL >> 25) & 0x3f)
+#define NETXEN_DIMM_VALID_FLAG 0x80000000
+
+#define NETXEN_DIMM_MEM_DDR2_SDRAM 0x8
+
+#define NETXEN_DIMM_STD_MEM_SIZE 512
+
+#define NETXEN_DIMM_TYPE_RDIMM 0x1
+#define NETXEN_DIMM_TYPE_UDIMM 0x2
+#define NETXEN_DIMM_TYPE_SO_DIMM 0x4
+#define NETXEN_DIMM_TYPE_Micro_DIMM 0x8
+#define NETXEN_DIMM_TYPE_Mini_RDIMM 0x10
+#define NETXEN_DIMM_TYPE_Mini_UDIMM 0x20
+
+/* Device State */
+#define NX_DEV_COLD 1
+#define NX_DEV_INITALIZING 2
+#define NX_DEV_READY 3
+#define NX_DEV_NEED_RESET 4
+#define NX_DEV_NEED_QUISCENT 5
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
+
+#define NX_RCODE_DRIVER_INFO 0x20000000
+#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define NX_RCODE_FATAL_ERROR 0x80000000
+#define NX_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+#define NX_FWERROR_PEGSTAT1(code) ((code >> 8) & 0x1fffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct netxen_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define NX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#endif /* __NETXEN_NIC_HDR_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
new file mode 100644
index 000000000..db80eb1c6
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -0,0 +1,2593 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data);
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr);
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
+ return PCI_OFFSET_SECOND_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
+ return PCI_OFFSET_THIRD_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] =
+{
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SRE,
+ NETXEN_HW_CRB_HUB_AGT_ADR_NIU,
+ NETXEN_HW_CRB_HUB_AGT_ADR_QMN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGND,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGSI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9,
+ NETXEN_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SMB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
+
+#define NETXEN_PCIE_SEM_TIMEOUT 10000
+
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
+
+int
+netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ NXWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
+{
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
+ }
+
+ return 0;
+}
+
+/* Disable an XG interface */
+static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
+{
+ __u32 mac_cfg;
+ u32 port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = 0;
+ if (NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
+ return -EIO;
+ return 0;
+}
+
+#define NETXEN_UNICAST_ADDR(port, index) \
+ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define NETXEN_MCAST_ADDR(port, index) \
+ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+ ((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+ ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ u32 mac_cfg;
+ u32 cnt = 0;
+ __u32 reg = 0x0200;
+ u32 port = adapter->physical_port;
+ u16 board_type = adapter->ahw.board_type;
+
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
+ mac_cfg &= ~0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
+ (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
+ reg = (0x20 << port);
+
+ NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
+
+ mdelay(10);
+
+ while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
+ mdelay(10);
+
+ if (cnt < 20) {
+
+ reg = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
+
+ if (mode == NETXEN_NIU_PROMISC_MODE)
+ reg = (reg | 0x2000UL);
+ else
+ reg = (reg & ~0x2000UL);
+
+ if (mode == NETXEN_NIU_ALLMULTI_MODE)
+ reg = (reg | 0x1000UL);
+ else
+ reg = (reg & ~0x1000UL);
+
+ NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+ }
+
+ mac_cfg |= 0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ return 0;
+}
+
+static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ u32 mac_hi, mac_lo;
+ u32 reg_hi, reg_lo;
+
+ u8 phy = adapter->physical_port;
+
+ if (phy >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+ mac_hi = addr[2] | ((u32)addr[3] << 8) |
+ ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+ reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+ reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+
+ /* write twice to flush */
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val |= (1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ /* add broadcast addr to filter */
+ val = 0xffffff;
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ /* add station addr to filter */
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
+
+ adapter->mc_enabled = 1;
+ return 0;
+}
+
+static int
+netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (!adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val &= ~(1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+
+ adapter->mc_enabled = 0;
+ return 0;
+}
+
+static int
+netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
+ int index, u8 *addr)
+{
+ u32 hi = 0, lo = 0;
+ u16 port = adapter->physical_port;
+
+ lo = MAC_LO(addr);
+ hi = MAC_HI(addr);
+
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
+
+ return 0;
+}
+
+static void netxen_p2_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ u8 null_addr[ETH_ALEN];
+ int i;
+
+ eth_zero_addr(null_addr);
+
+ if (netdev->flags & IFF_PROMISC) {
+
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_PROMISC_MODE);
+
+ /* Full promiscuous mode */
+ netxen_nic_disable_mcast_filter(adapter);
+
+ return;
+ }
+
+ if (netdev_mc_empty(netdev)) {
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_NON_PROMISC_MODE);
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ netxen_nic_enable_mcast_filter(adapter);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
+
+ /* Clear out remaining addresses */
+ while (i < adapter->max_mc_count)
+ netxen_nic_set_mcast_addr(adapter, i++, null_addr);
+}
+
+static int
+netxen_send_cmd_descs(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct netxen_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct nx_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= netxen_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > nr_desc) {
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
+{
+ nx_nic_req_t req;
+ nx_mac_req_t *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+ word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (nx_mac_req_t *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+ return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ const u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, nx_mac_list_t, list);
+
+ if (ether_addr_equal(addr, cur->mac_addr)) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+ if (cur == NULL)
+ return -ENOMEM;
+
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_ADD);
+}
+
+static void netxen_p3_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
+ }
+
+send_fw_cmd:
+ adapter->set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ nx_nic_req_t req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return netxen_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
+{
+ nx_mac_list_t *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ /* assuming caller has already copied new addr to netdev */
+ netxen_p3_nic_set_multi(adapter->netdev);
+ return 0;
+}
+
+#define NETXEN_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "interrupt coalescing parameters\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure hw lro request\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable)
+ return rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure bridge mode request\n");
+ }
+
+ adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int netxen_config_rss(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < ARRAY_SIZE(key); i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure RSS\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ memcpy(&req.words[1], &ip, sizeof(u32));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n",
+ adapter->netdev->name,
+ (cmd == NX_IP_UP) ? "Add" : "Remove", ip);
+ }
+ return rv;
+}
+
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure link notification\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not cleanup lro flows\n",
+ adapter->netdev->name);
+ }
+ return rv;
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR 100
+
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int max_mtu;
+ int rc = 0;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ max_mtu = P3_MAX_MTU;
+ else
+ max_mtu = P2_MAX_MTU;
+
+ if (mtu > max_mtu) {
+ printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
+ netdev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ if (adapter->set_mtu)
+ rc = adapter->set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+ int size, __le32 * buf)
+{
+ int i, v, addr;
+ __le32 *ptr32;
+
+ addr = base;
+ ptr32 = buf;
+ for (i = 0; i < size / sizeof(u32); i++) {
+ if (netxen_rom_fast_read(adapter, addr, &v) == -1)
+ return -1;
+ *ptr32 = cpu_to_le32(v);
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ if ((char *)buf + size > (char *)ptr32) {
+ __le32 local;
+ if (netxen_rom_fast_read(adapter, addr, &v) == -1)
+ return -1;
+ local = cpu_to_le32(v);
+ memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+ }
+
+ return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ __le32 *pmac = (__le32 *) mac;
+ u32 offset;
+
+ offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == ~0ULL) {
+
+ offset = NX_OLD_MAC_ADDR_OFFSET +
+ (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter,
+ offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == ~0ULL)
+ return -1;
+ }
+ return 0;
+}
+
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ uint32_t crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = NXRD32(adapter, crbaddr);
+ mac_hi = NXRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+static void
+netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
+ u32 window)
+{
+ void __iomem *offset;
+ int count = 10;
+ u8 func = adapter->ahw.pci_func;
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
+
+ writel(window, offset);
+ do {
+ if (window == readl(offset))
+ break;
+
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d\n",
+ (window == NETXEN_WINDOW_ONE));
+ udelay(1);
+
+ } while (--count > 0);
+
+ if (count > 0)
+ adapter->ahw.crb_win = window;
+}
+
+/*
+ * Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ crb_128M_2M_sub_block_map_t *m;
+
+
+ if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+ (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+}
+
+static void __iomem *
+netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter,
+ ulong win_off, void __iomem **mem_ptr)
+{
+ ulong off = win_off;
+ void __iomem *addr;
+ resource_size_t mem_base;
+
+ if (ADDR_IN_WINDOW1(win_off))
+ off = NETXEN_CRB_NORMAL(win_off);
+
+ addr = pci_base_offset(adapter, off);
+ if (addr)
+ return addr;
+
+ if (adapter->ahw.pci_len0 == 0)
+ off -= NETXEN_PCI_CRBSPACE;
+
+ mem_base = pci_resource_start(adapter->pdev, 0);
+ *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
+ if (*mem_ptr)
+ addr = *mem_ptr + (off & (PAGE_SIZE - 1));
+
+ return addr;
+}
+
+static int
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ netxen_nic_io_write_128M(adapter, addr, data);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ writel(data, addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return 0;
+}
+
+static u32
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+ u32 data;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ data = netxen_nic_io_read_128M(adapter, addr);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ data = readl(addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return data;
+}
+
+static int
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+static u32
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+/* window 1 registers only */
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ read_lock(&adapter->ahw.crb_lock);
+ writel(data, addr);
+ read_unlock(&adapter->ahw.crb_lock);
+}
+
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ u32 val;
+
+ read_lock(&adapter->ahw.crb_lock);
+ val = readl(addr);
+ read_unlock(&adapter->ahw.crb_lock);
+
+ return val;
+}
+
+static void netxen_nic_io_write_2M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ writel(data, addr);
+}
+
+static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ return readl(addr);
+}
+
+void __iomem *
+netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+ (offset > NETXEN_CRB_PCIX_HOST))
+ addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+ else
+ addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+ } else {
+ WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+ offset, &addr));
+ }
+
+ return addr;
+}
+
+static int
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0);
+ return 0;
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+
+ window = OCM_WIN(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ ret = adapter->pci_set_window(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
+ }
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+#define MAX_CTL_CHECK 1000
+
+static int
+netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_WRDATA_LO;
+ data_hi = SIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_WRDATA_LO;
+ data_hi = MIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, &data, 1);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(data & 0xffffffff, (mem_crb + data_lo));
+ writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl((mem_crb + TEST_AGT_CTRL));
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_RDDATA_LO;
+ data_hi = SIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_RDDATA_LO;
+ data_hi = MIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+
+ temp = readl(mem_crb + data_hi);
+ val = ((u64)temp << 32);
+ val |= readl(mem_crb + data_lo);
+ *data = val;
+ ret = 0;
+ }
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off8;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
+ return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
+ *data = val;
+ ret = 0;
+ }
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+void
+netxen_setup_hwops(struct netxen_adapter *adapter)
+{
+ adapter->init_port = netxen_niu_xg_init_port;
+ adapter->stop_port = netxen_niu_disable_xg_port;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->crb_read = netxen_nic_hw_read_wx_128M,
+ adapter->crb_write = netxen_nic_hw_write_wx_128M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_128M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_128M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_128M,
+ adapter->io_read = netxen_nic_io_read_128M,
+ adapter->io_write = netxen_nic_io_write_128M,
+
+ adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+ adapter->set_multi = netxen_p2_nic_set_multi;
+ adapter->set_mtu = netxen_nic_set_mtu_xgb;
+ adapter->set_promisc = netxen_p2_nic_set_promisc;
+
+ } else {
+ adapter->crb_read = netxen_nic_hw_read_wx_2M,
+ adapter->crb_write = netxen_nic_hw_write_wx_2M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_2M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_2M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_2M,
+ adapter->io_read = netxen_nic_io_read_2M,
+ adapter->io_write = netxen_nic_io_write_2M,
+
+ adapter->set_mtu = nx_fw_cmd_set_mtu;
+ adapter->set_promisc = netxen_p3_nic_set_promisc;
+ adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+ adapter->set_multi = netxen_p3_nic_set_multi;
+
+ adapter->phy_read = nx_fw_cmd_query_phy;
+ adapter->phy_write = nx_fw_cmd_set_phy;
+ }
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = NX_FW_MAGIC_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != NETXEN_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = NX_BRDTYPE_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = NETXEN_BRDTYPE_P3_10G_TP;
+ }
+
+ adapter->ahw.board_type = board_type;
+
+ switch (board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ case NETXEN_BRDTYPE_P1_BD:
+ case NETXEN_BRDTYPE_P1_SB:
+ case NETXEN_BRDTYPE_P1_SMAX:
+ case NETXEN_BRDTYPE_P1_SOCK:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+/* NIU access sections */
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
+{
+ new_mtu += MTU_FUDGE_FACTOR;
+ if (adapter->physical_port == 0)
+ NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+ else
+ NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
+ return 0;
+}
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
+{
+ __u32 status;
+ __u32 autoneg;
+ __u32 port_mode;
+
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ adapter->link_autoneg = AUTONEG_ENABLE;
+ return;
+ }
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ adapter->link_speed = SPEED_1000;
+ adapter->link_duplex = DUPLEX_FULL;
+ adapter->link_autoneg = AUTONEG_DISABLE;
+ return;
+ }
+
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_link(status)) {
+ switch (netxen_get_phy_speed(status)) {
+ case 0:
+ adapter->link_speed = SPEED_10;
+ break;
+ case 1:
+ adapter->link_speed = SPEED_100;
+ break;
+ case 2:
+ adapter->link_speed = SPEED_1000;
+ break;
+ default:
+ adapter->link_speed = 0;
+ break;
+ }
+ switch (netxen_get_phy_duplex(status)) {
+ case 0:
+ adapter->link_duplex = DUPLEX_HALF;
+ break;
+ case 1:
+ adapter->link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ adapter->link_duplex = -1;
+ break;
+ }
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ &autoneg) != 0)
+ adapter->link_autoneg = autoneg;
+ } else
+ goto link_down;
+ } else {
+ link_down:
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ }
+ }
+}
+
+int
+netxen_nic_wol_supported(struct netxen_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+static u32 netxen_md_cntrl(struct netxen_adapter *adapter,
+ struct netxen_minidump_template_hdr *template_hdr,
+ struct netxen_minidump_entry_crb *crtEntry)
+{
+ int loop_cnt, i, rv = 0, timeout_flag;
+ u32 op_count, stride;
+ u32 opcode, read_value, addr;
+ unsigned long timeout, timeout_jiffies;
+ addr = crtEntry->addr;
+ op_count = crtEntry->op_count;
+ stride = crtEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) {
+ opcode = (crtEntry->opcode & (0x1 << i));
+ if (opcode) {
+ switch (opcode) {
+ case NX_DUMP_WCRB:
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ crtEntry->value_1);
+ break;
+ case NX_DUMP_RWCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ANDCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value &= crtEntry->value_2;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ORCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value |= crtEntry->value_3;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_POLLCRB:
+ timeout = crtEntry->poll_timeout;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ timeout_jiffies =
+ msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0;
+ !timeout_flag
+ && ((read_value & crtEntry->value_2)
+ != crtEntry->value_1);) {
+ if (time_after(jiffies,
+ timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ }
+
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev, "%s : "
+ "Timeout in poll_crb control operation.\n"
+ , __func__);
+ return -1;
+ }
+ break;
+ case NX_DUMP_RD_SAVE:
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ case NX_DUMP_WRT_SAVED:
+ /* Decide which value to use */
+ if (crtEntry->state_index_v)
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ else
+ read_value = crtEntry->value_1;
+
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_MOD_SAVE_ST:
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ read_value <<= crtEntry->shl;
+ read_value >>= crtEntry->shr;
+ if (crtEntry->value_2)
+ read_value &=
+ crtEntry->value_2;
+ read_value |= crtEntry->value_3;
+ read_value += crtEntry->value_1;
+ /* Write value back to state area.*/
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ default:
+ rv = 1;
+ break;
+ }
+ }
+ }
+ addr = addr + stride;
+ }
+ return rv;
+}
+
+/* Read memory or MN */
+static u32
+netxen_md_rdmem(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdmem
+ *memEntry, u64 *data_buff)
+{
+ u64 addr, value = 0;
+ int i = 0, loop_cnt;
+
+ addr = (u64)memEntry->read_addr;
+ loop_cnt = memEntry->read_data_size; /* This is size in bytes */
+ loop_cnt /= sizeof(value);
+
+ for (i = 0; i < loop_cnt; i++) {
+ if (netxen_nic_pci_mem_read_2M(adapter, addr, &value))
+ goto out;
+ *data_buff++ = value;
+ addr += sizeof(value);
+ }
+out:
+ return i * sizeof(value);
+}
+
+/* Read CRB operation */
+static u32 netxen_md_rd_crb(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_crb
+ *crbEntry, u32 *data_buff)
+{
+ int loop_cnt;
+ u32 op_count, addr, stride, value;
+
+ addr = crbEntry->addr;
+ op_count = crbEntry->op_count;
+ stride = crbEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value);
+ *data_buff++ = addr;
+ *data_buff++ = value;
+ addr = addr + stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Read ROM */
+static u32
+netxen_md_rdrom(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdrom
+ *romEntry, __le32 *data_buff)
+{
+ int i, count = 0;
+ u32 size, lck_val;
+ u32 val;
+ u32 fl_addr, waddr, raddr;
+ fl_addr = romEntry->read_addr;
+ size = romEntry->read_data_size/4;
+lock_try:
+ lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_SEM2_LK));
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(20);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ waddr = fl_addr & 0xFFFF0000;
+ NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr);
+ raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF);
+ NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val);
+ *data_buff++ = cpu_to_le32(val);
+ fl_addr += sizeof(val);
+ }
+ readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK));
+ return romEntry->read_data_size;
+}
+
+/* Handle L2 Cache */
+static u32
+netxen_md_L2Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int loop_cnt, i, k, timeout_flag = 0;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w, cntl_value_r;
+ unsigned long timeout, timeout_jiffies;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ if (cntl_value_w)
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32)cntl_value_w);
+ if (cacheEntry->poll_mask) {
+ timeout = cacheEntry->poll_wait;
+ NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ &cntl_value_r);
+ timeout_jiffies = msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0; !timeout_flag &&
+ ((cntl_value_r & cacheEntry->poll_mask) != 0);) {
+ if (time_after(jiffies, timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(cntrl_addr,
+ adapter->ahw.pci_base0,
+ &cntl_value_r);
+ }
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout in processing L2 Tag poll.\n");
+ return -1;
+ }
+ }
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+
+/* Handle L1 Cache */
+static u32 netxen_md_L1Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int i, k, loop_cnt;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32) cntl_value_w);
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+/* Reading OCM memory */
+static u32
+netxen_md_rdocm(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdocm
+ *ocmEntry, u32 *data_buff)
+{
+ int i, loop_cnt;
+ u32 value;
+ void __iomem *addr;
+ addr = (ocmEntry->read_addr + adapter->ahw.pci_base0);
+ loop_cnt = ocmEntry->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ value = readl(addr);
+ *data_buff++ = value;
+ addr += ocmEntry->read_addr_stride;
+ }
+ return i * sizeof(u32);
+}
+
+/* Read MUX data */
+static u32
+netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux
+ *muxEntry, u32 *data_buff)
+{
+ int loop_cnt = 0;
+ u32 read_addr, read_value, select_addr, sel_value;
+
+ read_addr = muxEntry->read_addr;
+ sel_value = muxEntry->select_value;
+ select_addr = muxEntry->select_addr;
+
+ for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value);
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value);
+ *data_buff++ = sel_value;
+ *data_buff++ = read_value;
+ sel_value += muxEntry->select_value_stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Handling Queue State Reads */
+static u32
+netxen_md_rdqueue(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_queue
+ *queueEntry, u32 *data_buff)
+{
+ int loop_cnt, k;
+ u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt;
+
+ read_cnt = queueEntry->read_addr_cnt;
+ read_stride = queueEntry->read_addr_stride;
+ select_addr = queueEntry->select_addr;
+
+ for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
+ loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
+ read_addr = queueEntry->read_addr;
+ for (k = 0; k < read_cnt; k--) {
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ read_addr += read_stride;
+ }
+ queue_id += queueEntry->queue_id_stride;
+ }
+ return loop_cnt * (read_cnt * sizeof(read_value));
+}
+
+
+/*
+* We catch an error where driver does not read
+* as much data as we expect from the entry.
+*/
+
+static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry *entry, int esize)
+{
+ if (esize < 0) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ return esize;
+ }
+ if (esize != entry->hdr.entry_capture_size) {
+ entry->hdr.entry_capture_size = esize;
+ entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR;
+ dev_info(&adapter->pdev->dev,
+ "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.entry_type, entry->hdr.entry_capture_mask,
+ esize, entry->hdr.entry_capture_size);
+ dev_info(&adapter->pdev->dev, "Aborting further dump capture\n");
+ }
+ return 0;
+}
+
+static int netxen_parse_md_template(struct netxen_adapter *adapter)
+{
+ int num_of_entries, buff_level, e_cnt, esize;
+ int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ char *dbuff;
+ void *template_buff = adapter->mdump.md_template;
+ char *dump_buff = adapter->mdump.md_capture_buff;
+ int capture_mask = adapter->mdump.md_capture_mask;
+ struct netxen_minidump_template_hdr *template_hdr;
+ struct netxen_minidump_entry *entry;
+
+ if ((capture_mask & 0x3) != 0x3) {
+ dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed "
+ "for valid firmware dump\n", capture_mask);
+ return -EINVAL;
+ }
+ template_hdr = (struct netxen_minidump_template_hdr *) template_buff;
+ num_of_entries = template_hdr->num_of_entries;
+ entry = (struct netxen_minidump_entry *) ((char *) template_buff +
+ template_hdr->first_entry_offset);
+ memcpy(dump_buff, template_buff, adapter->mdump.md_template_size);
+ dump_buff = dump_buff + adapter->mdump.md_template_size;
+
+ if (template_hdr->entry_type == TLHDR)
+ sane_start = 1;
+
+ for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) {
+ if (!(entry->hdr.entry_capture_mask & capture_mask)) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ continue;
+ }
+ switch (entry->hdr.entry_type) {
+ case RDNOP:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDEND:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ if (!sane_end)
+ end_cnt = e_cnt;
+ sane_end += 1;
+ break;
+ case CNTRL:
+ rv = netxen_md_cntrl(adapter,
+ template_hdr, (void *)entry);
+ if (rv)
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDCRB:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rd_crb(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMN:
+ case RDMEM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmem(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case BOARD:
+ case RDROM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdrom(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L2ITG:
+ case L2DTG:
+ case L2DAT:
+ case L2INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L2Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L1DAT:
+ case L1INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L1Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDOCM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdocm(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMUX:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmux(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case QUEUE:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdqueue(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ default:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ }
+ /* Next entry in the template */
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ }
+ if (!sane_start || sane_end > 1) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware minidump template configuration error.\n");
+ }
+ return 0;
+}
+
+static int
+netxen_collect_minidump(struct netxen_adapter *adapter)
+{
+ int ret = 0;
+ struct netxen_minidump_template_hdr *hdr;
+ struct timespec val;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
+ jiffies_to_timespec(jiffies, &val);
+ hdr->driver_timestamp = (u32) val.tv_sec;
+ hdr->driver_info_word2 = adapter->fw_version;
+ hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
+ ret = netxen_parse_md_template(adapter);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+void
+netxen_dump_fw(struct netxen_adapter *adapter)
+{
+ struct netxen_minidump_template_hdr *hdr;
+ int i, k, data_size = 0;
+ u32 capture_mask;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ capture_mask = adapter->mdump.md_capture_mask;
+
+ for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) {
+ if (i & capture_mask)
+ data_size += hdr->capture_size_array[k];
+ }
+ if (!data_size) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid cap sizes for capture_mask=0x%x\n",
+ adapter->mdump.md_capture_mask);
+ return;
+ }
+ adapter->mdump.md_capture_size = data_size;
+ adapter->mdump.md_dump_size = adapter->mdump.md_template_size +
+ adapter->mdump.md_capture_size;
+ if (!adapter->mdump.md_capture_buff) {
+ adapter->mdump.md_capture_buff =
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
+ return;
+
+ if (netxen_collect_minidump(adapter)) {
+ adapter->mdump.has_valid_dump = 0;
+ adapter->mdump.md_dump_size = 0;
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ dev_err(&adapter->pdev->dev,
+ "Error in collecting firmware minidump.\n");
+ } else {
+ adapter->mdump.md_timestamp = jiffies;
+ adapter->mdump.has_valid_dump = 1;
+ adapter->fw_mdump_rdy = 1;
+ dev_info(&adapter->pdev->dev, "%s Successfully "
+ "collected fw dump.\n", adapter->netdev->name);
+ }
+
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Cannot overwrite previously collected "
+ "firmware minidump.\n");
+ adapter->fw_mdump_rdy = 1;
+ return;
+ }
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
new file mode 100644
index 000000000..7433c4d21
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+
+#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_tx_flowctl(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define netxen_gb_tx_reset_pb(config_word) \
+ ((config_word) |= 1 << 16)
+#define netxen_gb_rx_reset_pb(config_word) \
+ ((config_word) |= 1 << 17)
+#define netxen_gb_tx_reset_mac(config_word) \
+ ((config_word) |= 1 << 18)
+#define netxen_gb_rx_reset_mac(config_word) \
+ ((config_word) |= 1 << 19)
+
+#define netxen_gb_unset_tx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+#define netxen_gb_get_tx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word) \
+ _netxen_crb_get_bit((config_word), 31)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \
+ ((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word) \
+ ((config_word) |= 1 << 31)
+#define netxen_gb_mii_mgmt_unset(config_word) \
+ ((config_word) &= ~(1 << 31))
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \
+ ((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \
+ ((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word) \
+ _netxen_crb_get_bit(config_word, 2)
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define netxen_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define netxen_xg_get_xg0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_xg_get_xg1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+
+#define netxen_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+#define netxen_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define netxen_gb_get_gb0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_gb_get_gb1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 2)
+#define netxen_gb_get_gb2_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_gb3_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 6)
+
+#define netxen_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define netxen_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define netxen_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define netxen_get_phy_link(config_word) \
+ _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_duplex(config_word) \
+ _netxen_crb_get_bit(config_word, 13)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+
+#define NETXEN_NIU_NON_PROMISC_MODE 0
+#define NETXEN_NIU_PROMISC_MODE 1
+#define NETXEN_NIU_ALLMULTI_MODE 2
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word) \
+ ((config_word) |= 1 << 4)
+
+typedef struct {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+} crb_128M_2M_sub_block_map_t;
+
+typedef struct {
+ crb_128M_2M_sub_block_map_t sub_block[16];
+} crb_128M_2M_block_map_t;
+
+#endif /* __NETXEN_NIC_HW_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
new file mode 100644
index 000000000..7b43a3b4a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -0,0 +1,1933 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <net/checksum.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR (0xffffffff)
+
+#define crb_addr_transform(name) \
+ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == NETXEN_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_buffer *cmd_buf;
+ struct netxen_skb_frag *buffrag;
+ int i, j;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ spin_lock_bh(&adapter->tx_clean_lock);
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+ spin_unlock_bh(&adapter->tx_clean_lock);
+}
+
+void netxen_free_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
+}
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int ring, i;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
+
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL)
+ goto err_out;
+
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
+ goto err_out;
+
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_JUMBO_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & NX_CAP0_HW_LRO)
+ rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL)
+ /* free whatever was already allocated */
+ goto err_out;
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = NETXEN_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ netxen_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+static u32 netxen_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = NETXEN_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == NETXEN_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define NETXEN_MAX_ROM_WAIT_USEC 100
+
+static int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct netxen_adapter *adapter,
+ int addr, int *valp)
+{
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (netxen_wait_rom_done(adapter)) {
+ printk("Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = netxen_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (netxen_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+#define NETXEN_BOARDTYPE 0x4008
+#define NETXEN_BOARDNUM 0x400c
+#define NETXEN_CHIPNUM 0x4010
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay = 0;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+
+ /* resetall */
+ netxen_rom_lock(adapter);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ netxen_rom_unlock(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ (n != 0xcafecafe) ||
+ netxen_rom_fast_read(adapter, 4, &n) != 0) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+ } else {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ !(n & 0x80000000)) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = 1;
+ n &= ~0x80000000;
+ }
+
+ if (n >= 1024) {
+ printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
+ " initialized.\n", __func__, n);
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = netxen_decode_crb_addr(buf[i].addr);
+ if (off == NETXEN_ADDR_ERROR) {
+ printk(KERN_ERR"CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += NETXEN_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == NETXEN_CAM_RAM(0x1fc))
+ continue;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (off == (NETXEN_CRB_I2C0 + 0x1c))
+ continue;
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
+ continue;
+ if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
+ !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ buf[i].data = 0x1020;
+ /* skip the function enable register */
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
+ continue;
+ }
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+ init_delay = 1000;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* hold xdma in reset also */
+ buf[i].data = NETXEN_NIC_XDMA_RESET;
+ buf[i].data = 0x8000ff;
+ }
+ }
+
+ NXWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* disable_peg_cache_all */
+
+ /* unreset_net_cache */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
+ }
+
+ /* p2dn replyCount */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ /* disable_peg_cache 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+
+ /* peg_clr 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+ /* peg_clr 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+ /* peg_clr 2 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+ /* peg_clr 3 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+ uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
+static int
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
+ __le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
+ 1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
+
+ ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -EINVAL;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ uint32_t flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ NX_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+
+ if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ mn_present = 0;
+ goto nomn;
+ }
+
+ return -EINVAL;
+}
+
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = nx_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_BOOTLD_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_BOOTLD,
+ NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_IMAGE_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret = 0;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+ fw_data_desc = nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ break;
+ }
+ }
+
+ if (ret != 3)
+ return 0;
+
+ return major + (minor << 8) + (sub << 16);
+
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + NX_UNI_BIOS_VERSION_OFF));
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
+ (bios_ver >> 24);
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
+int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+ u8 fw_type;
+
+ /* NX2031 firmware doesn't support heartbit */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ NXWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = nx_get_fw_version(adapter);
+
+ version = NETXEN_DECODE_VERSION(val);
+
+ major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ if (version > NETXEN_VERSION_CODE(major, minor, build))
+ return 1;
+
+ if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+ adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
+
+ val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+ fw_type = (val & 0x4) ?
+ NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+ if (adapter->fw_type != fw_type)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
+
+int
+netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
+{
+ u32 flash_fw_ver, min_fw_ver;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&adapter->pdev->dev, "Unable to read flash fw"
+ "version\n");
+ return -EIO;
+ }
+
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+ min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
+ if (flash_fw_ver >= min_fw_ver)
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
+ "[4.0.505]. Please update firmware on flash\n",
+ _major(flash_fw_ver), _minor(flash_fw_ver),
+ _build(flash_fw_ver));
+ return -EINVAL;
+}
+
+static char *fw_name[] = {
+ NX_P2_MN_ROMIMAGE_NAME,
+ NX_P3_CT_ROMIMAGE_NAME,
+ NX_P3_MN_ROMIMAGE_NAME,
+ NX_UNIFIED_ROMIMAGE_NAME,
+ NX_FLASH_ROMIMAGE_NAME,
+};
+
+int
+netxen_load_firmware(struct netxen_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
+
+ if (fw) {
+ __le64 data;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)nx_get_bootld_offs(adapter);
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)nx_get_fw_offs(adapter);
+ flashaddr = NETXEN_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (netxen_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (netxen_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ /* hi, lo are already in host endian byteorder */
+ data = (((u64)hi << 32) | lo);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+ else {
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
+ }
+
+ return 0;
+}
+
+static int
+netxen_validate_firmware(struct netxen_adapter *adapter)
+{
+ __le32 val;
+ __le32 flash_fw_ver;
+ u32 file_fw_ver, min_ver, bios;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+ u32 crbinit_fix_fw;
+
+ if (fw_type == NX_UNIFIED_ROMIMAGE) {
+ if (netxen_nic_validate_unified_romimage(adapter))
+ return -EINVAL;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+ return -EINVAL;
+
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
+ }
+
+ val = nx_get_fw_version(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ min_ver = NETXEN_MIN_P3_FW_SUPP;
+ else
+ min_ver = NETXEN_VERSION_CODE(3, 4, 216);
+
+ file_fw_ver = NETXEN_DECODE_VERSION(val);
+
+ if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
+ (file_fw_ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
+ _build(file_fw_ver));
+ return -EINVAL;
+ }
+ val = nx_get_bios_version(adapter);
+ netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&pdev->dev, "Unable to read flash fw version\n");
+ return -EIO;
+ }
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+
+ /* New fw from file is not allowed, if fw on flash is < 4.0.554 */
+ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
+ if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
+ NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ dev_err(&pdev->dev, "Incompatibility detected between driver "
+ "and firmware version on flash. This configuration "
+ "is not recommended. Please update the firmware on "
+ "flash immediately\n");
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer only for no-mn and P2 case*/
+ if (!netxen_p3_has_mn(adapter) ||
+ NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (flash_fw_ver > file_fw_ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+ }
+
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case NX_UNKNOWN_ROMIMAGE:
+ fw_type = NX_UNIFIED_ROMIMAGE;
+ break;
+
+ case NX_UNIFIED_ROMIMAGE:
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ fw_type = NX_FLASH_ROMIMAGE;
+ else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ fw_type = NX_P2_MN_ROMIMAGE;
+ else if (netxen_p3_has_mn(adapter))
+ fw_type = NX_P3_MN_ROMIMAGE;
+ else
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P3_MN_ROMIMAGE:
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P2_MN_ROMIMAGE:
+ case NX_P3_CT_ROMIMAGE:
+ default:
+ fw_type = NX_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+static int
+netxen_p3_has_mn(struct netxen_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ /* NX2031 always had MN */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
+
+ capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
+ if (capability & NX_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+void netxen_request_firmware(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc = 0;
+
+ adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
+
+next:
+ nx_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = netxen_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+netxen_release_firmware(struct netxen_adapter *adapter)
+{
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int netxen_init_dummy_dma(struct netxen_adapter *adapter)
+{
+ u64 addr;
+ u32 hi, lo;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr);
+ if (adapter->dummy_dma.addr == NULL) {
+ dev_err(&adapter->pdev->dev,
+ "ERROR: Could not allocate dummy DMA memory\n");
+ return -ENOMEM;
+ }
+
+ addr = (uint64_t) adapter->dummy_dma.phys_addr;
+ hi = (addr >> 32) & 0xffffffff;
+ lo = addr & 0xffffffff;
+
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+
+ return 0;
+}
+
+/*
+ * NetXen DMA watchdog control:
+ *
+ * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
+ * Bit 1 : disable_request => 1 req disable dma watchdog
+ * Bit 2 : enable_request => 1 req enable dma watchdog
+ * Bit 3-31 : unused
+ */
+void netxen_free_dummy_dma(struct netxen_adapter *adapter)
+{
+ int i = 100;
+ u32 ctrl;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ if (!adapter->dummy_dma.addr)
+ return;
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+ if ((ctrl & 0x1) != 0) {
+ NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
+
+ while ((ctrl & 0x1) != 0) {
+
+ msleep(50);
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+
+ if (--i == 0)
+ break;
+ }
+ }
+
+ if (i) {
+ pci_free_consistent(adapter->pdev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = NULL;
+ } else
+ dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
+}
+
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ if (pegtune_val)
+ return 0;
+
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+netxen_receive_peg_ready(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ int retries = 2000;
+
+ do {
+ val = NXRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ printk(KERN_ERR "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+ int err;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
+ return err;
+}
+
+static void
+netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+ printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+ netdev->name, cable_OUI, cable_len);
+ } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+ printk(KERN_INFO "%s: unsupported cable length %d\n",
+ netdev->name, cable_len);
+ }
+
+ /* update link parameters */
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
+}
+
+static void
+netxen_handle_fw_message(int desc_cnt, int index,
+ struct nx_host_sds_ring *sds_ring)
+{
+ nx_fw_msg_t msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = netxen_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ netxen_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+netxen_alloc_rx_skb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring,
+ struct netxen_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+ if (!buffer->skb)
+ return 1;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return 1;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = NETXEN_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
+ && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = NETXEN_BUFFER_FREE;
+ return skb;
+}
+
+static struct netxen_rx_buffer *
+netxen_process_rcv(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = netxen_get_sts_totallength(sts_data0);
+ cksum = netxen_get_sts_status(sts_data0);
+ pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define TCP_HDR_SIZE 20
+#define TCP_TS_OPTION_SIZE 12
+#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
+
+static struct netxen_rx_buffer *
+netxen_process_lro(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+ u8 vhdr_len = 0;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = netxen_get_lro_sts_timestamp(sts_data0);
+ lro_length = netxen_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = netxen_get_lro_sts_push_flag(sts_data0);
+ seq_number = netxen_get_lro_sts_seq_number(sts_data1);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ vhdr_len = VLAN_HLEN;
+ iph = (struct iphdr *)(skb->data + vhdr_len);
+ th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ if (adapter->flags & NETXEN_FW_MSS_CAP)
+ skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define netxen_merge_rx_buffers(list, head) \
+ do { list_splice_tail_init(list, head); } while (0);
+
+int
+netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ struct list_head *cur;
+
+ struct status_desc *desc;
+ struct netxen_rx_buffer *rxbuf;
+
+ u32 consumer = sds_ring->consumer;
+
+ int count = 0;
+ u64 sts_data0, sts_data1;
+ int opcode, ring = 0, desc_cnt;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
+
+ opcode = netxen_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case NETXEN_NIC_RXPKT_DESC:
+ case NETXEN_OLD_RXPKT_DESC:
+ case NETXEN_NIC_SYN_OFFLOAD:
+ ring = netxen_get_sts_type(sts_data0);
+ rxbuf = netxen_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case NETXEN_NIC_LRO_DESC:
+ ring = netxen_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = netxen_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case NETXEN_NIC_RESPONSE_DESC:
+ netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct nx_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct netxen_rx_buffer, list);
+ netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ netxen_merge_rx_buffers(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ netxen_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
+ }
+
+ return count;
+}
+
+/* Process Command status ring */
+int netxen_process_cmd_ring(struct netxen_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct netxen_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_skb_frag *frag;
+ int done = 0;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock_bh(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++; /* Get the next frag */
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock_bh(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+void
+netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ netxen_ctx_msg msg = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer-1) & (rds_ring->num_desc-1));
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /*
+ * Write a doorbell msg to tell phanmon of change in
+ * receive ring producer
+ * Only for firmware version < 4.0.0
+ */
+ netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+ netxen_set_msg_privid(msg);
+ netxen_set_msg_count(msg,
+ ((producer - 1) &
+ (rds_ring->num_desc - 1)));
+ netxen_set_msg_ctxid(msg, adapter->portnum);
+ netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+ NXWRIO(adapter, DB_NORMALIZE(adapter,
+ NETXEN_RCV_PRODUCER_OFFSET), msg);
+ }
+ }
+}
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer - 1) & (rds_ring->num_desc - 1));
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+}
+
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
new file mode 100644
index 000000000..6409a06bb
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -0,0 +1,3526 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
+
+char netxen_nic_driver_name[] = "netxen_nic";
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
+ NETXEN_NIC_LINUX_VERSIONID;
+
+static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+
+static int use_msi_x = 1;
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
+
+static int netxen_nic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
+ struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
+static void netxen_fw_poll_work(struct work_struct *work);
+static void netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay);
+static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
+static int netxen_nic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev);
+#endif
+
+static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_create_diag_entries(struct netxen_adapter *adapter);
+static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
+static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
+static int netxen_can_start_firmware(struct netxen_adapter *adapter);
+
+static irqreturn_t netxen_intr(int irq, void *data);
+static irqreturn_t netxen_msi_intr(int irq, void *data);
+static irqreturn_t netxen_msix_intr(int irq, void *data);
+
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
+static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+static int netxen_nic_set_mac(struct net_device *netdev, void *p);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id netxen_pci_tbl[] = {
+ ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
+ ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
+ ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
+ ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
+ ENTRY(PCI_DEVICE_ID_NX3031),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+static uint32_t crb_cmd_producer[4] = {
+ CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
+ CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
+};
+
+void
+netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
+}
+
+static uint32_t crb_cmd_consumer[4] = {
+ CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
+ CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
+};
+
+static inline void
+netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
+}
+
+static uint32_t msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
+
+static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
+}
+
+static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
+
+ if (!NETXEN_IS_MSI_FAMILY(adapter))
+ NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
+}
+
+static int
+netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct nx_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+static void
+netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
+{
+ kfree(recv_ctx->sds_rings);
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ netxen_nic_poll, NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+netxen_napi_del(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ netxen_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+netxen_napi_enable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ netxen_nic_enable_int(sds_ring);
+ }
+}
+
+static void
+netxen_napi_disable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_nic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static int nx_set_dma_mask(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ uint64_t mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(32);
+ cmask = DMA_BIT_MASK(32);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+#ifndef CONFIG_IA64
+ mask = DMA_BIT_MASK(35);
+#endif
+ } else {
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+ }
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+nx_update_dma_mask(struct netxen_adapter *adapter)
+{
+ int change, shift, err;
+ uint64_t mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = NXRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
+ change = 1;
+ else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ }
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static int
+netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
+{
+ u32 val, timeout;
+
+ if (first_boot == 0x55555555) {
+ /* This is the first boot after power up */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ /* PCI bus master workaround */
+ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ if (!(first_boot & 0x4)) {
+ first_boot |= 0x4;
+ NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ }
+
+ /* This is the first boot after power up */
+ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ if (first_boot != 0x80000f) {
+ /* clear the register for future unloads/loads */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
+ return -EIO;
+ }
+
+ /* Start P2 boot loader */
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+ timeout = 0;
+ do {
+ msleep(1);
+ val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ if (++timeout > 5000)
+ return -EIO;
+
+ } while (val == NETXEN_BDINFO_MAGIC);
+ }
+ return 0;
+}
+
+static void netxen_set_port_mode(struct netxen_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
+ (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ data = NETXEN_PORT_MODE_802_3_AP;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_XG) {
+ data = NETXEN_PORT_MODE_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_1G;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else {
+ data = NETXEN_PORT_MODE_AUTO_NEG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != NETXEN_PORT_MODE_XG) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+ }
+ NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+#define PCI_CAP_ID_GEN 0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+ u32 pdevfuncsave;
+ u32 c8c9value = 0;
+ u32 chicken = 0;
+ u32 control = 0;
+ int i, pos;
+ struct pci_dev *pdev;
+
+ pdev = adapter->pdev;
+
+ chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
+ /* clear chicken3.25:24 */
+ chicken &= 0xFCFFFFFF;
+ /*
+ * if gen1 and B0, set F1020 - if gen 2, do nothing
+ * if gen2 set to F1000
+ */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+ if (pos == 0xC0) {
+ pci_read_config_dword(pdev, pos + 0x10, &control);
+ if ((control & 0x000F0000) != 0x00020000) {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ }
+ dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
+ c8c9value = 0xF1000;
+ } else {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
+ if (adapter->ahw.revision_id == NX_P3_B0)
+ c8c9value = 0xF1020;
+ else
+ c8c9value = 0;
+ }
+
+ NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
+
+ if (!c8c9value)
+ return;
+
+ pdevfuncsave = pdev->devfn;
+ if (pdevfuncsave & 0x07)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_write_config_dword(pdev, pos + 8, c8c9value);
+ pdev->devfn++;
+ }
+ pdev->devfn = pdevfuncsave;
+}
+
+static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
+ }
+}
+
+static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+netxen_read_mac_addr(struct netxen_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ } else {
+ if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ }
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
+
+ return 0;
+}
+
+static int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ netxen_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ adapter->macaddr_set(adapter, addr->sa_data);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ netxen_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static void netxen_set_multicast_list(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ adapter->set_multi(dev);
+}
+
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_RXCSUM)) {
+ netdev_info(dev, "disabling LRO as RXCSUM is off\n");
+
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int hw_lro;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
+ : NETXEN_NIC_LRO_DISABLED;
+
+ if (netxen_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct net_device_ops netxen_netdev_ops = {
+ .ndo_open = netxen_nic_open,
+ .ndo_stop = netxen_nic_close,
+ .ndo_start_xmit = netxen_nic_xmit_frame,
+ .ndo_get_stats64 = netxen_nic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = netxen_set_multicast_list,
+ .ndo_set_mac_address = netxen_nic_set_mac,
+ .ndo_change_mtu = netxen_nic_change_mtu,
+ .ndo_tx_timeout = netxen_tx_timeout,
+ .ndo_fix_features = netxen_fix_features,
+ .ndo_set_features = netxen_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netxen_nic_poll_controller,
+#endif
+};
+
+static inline bool netxen_function_zero(struct pci_dev *pdev)
+{
+ return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
+}
+
+static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+ u32 mode)
+{
+ NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
+}
+
+static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
+{
+ return NXRD32(adapter, NETXEN_INTR_MODE_REG);
+}
+
+static void
+netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
+{
+ struct netxen_legacy_intr_set *legacy_intrp;
+ u32 tgt_status_reg, int_state_reg;
+
+ if (adapter->ahw.revision_id >= NX_P3_B0)
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+ else
+ legacy_intrp = &legacy_intr[0];
+
+ tgt_status_reg = legacy_intrp->tgt_status_reg;
+ int_state_reg = ISR_INT_STATE_REG;
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
+ adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->pci_int_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->pci_int_reg);
+ adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ if (adapter->ahw.revision_id >= NX_P3_B1)
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ int_state_reg);
+ else
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ CRB_INT_VECTOR);
+}
+
+static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
+ int num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 value;
+ int err;
+
+ if (adapter->msix_supported) {
+ netxen_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
+ adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+ netxen_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return 0;
+ }
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ value = msi_tgt_status[adapter->ahw.pci_func];
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
+ adapter->msix_entries[0].vector = pdev->irq;
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
+ return -EIO;
+}
+
+static int netxen_setup_intr(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix;
+
+ if (adapter->rss_supported)
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+ adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+ netxen_initialize_interrupt_registers(adapter);
+ netxen_set_msix_bit(pdev, 0);
+
+ if (netxen_function_zero(pdev)) {
+ if (!netxen_setup_msi_interrupts(adapter, num_msix))
+ netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+ else
+ netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
+ } else {
+ if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
+ netxen_setup_msi_interrupts(adapter, num_msix)) {
+ dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
+ return -EIO;
+ }
+ }
+
+ if (!NETXEN_IS_MSI_FAMILY(adapter)) {
+ adapter->msix_entries[0].vector = pdev->irq;
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ }
+ return 0;
+}
+
+static void
+netxen_teardown_intr(struct netxen_adapter *adapter)
+{
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+netxen_cleanup_pci_map(struct netxen_adapter *adapter)
+{
+ if (adapter->ahw.db_base != NULL)
+ iounmap(adapter->ahw.db_base);
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+ if (adapter->ahw.pci_base1 != NULL)
+ iounmap(adapter->ahw.pci_base1);
+ if (adapter->ahw.pci_base2 != NULL)
+ iounmap(adapter->ahw.pci_base2);
+}
+
+static int
+netxen_setup_pci_map(struct netxen_adapter *adapter)
+{
+ void __iomem *db_ptr = NULL;
+
+ resource_size_t mem_base, db_base;
+ unsigned long mem_len, db_len = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+ struct netxen_hardware_context *ahw = &adapter->ahw;
+
+ int err = 0;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ /* 128 Meg of memory */
+ if (mem_len == NETXEN_PCI_128MB_SIZE) {
+
+ ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+ SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+ THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+ ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
+ } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
+
+ ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+ SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
+
+ ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+ if (ahw->pci_base0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ ahw->pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ netxen_setup_hwops(adapter);
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ goto skip_doorbell;
+
+ db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
+ db_len = pci_resource_len(pdev, 4);
+
+ if (db_len == 0) {
+ printk(KERN_ERR "%s: doorbell is disabled\n",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+ db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+ if (!db_ptr) {
+ printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+skip_doorbell:
+ adapter->ahw.db_base = db_ptr;
+ adapter->ahw.db_len = db_len;
+ return 0;
+
+err_out:
+ netxen_cleanup_pci_map(adapter);
+ return err;
+}
+
+static void
+netxen_check_options(struct netxen_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
+ char brd_name[NETXEN_MAX_SHORT_NAME];
+ char serial_num[32];
+ int i, offset, val, err;
+ __le32 *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (__le32 *)&serial_num;
+ offset = NX_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+ prev_fw_version = adapter->fw_version;
+ adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ /* Get FW Mini Coredump template and store it */
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (adapter->mdump.md_template == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ err = netxen_setup_minidump(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup minidump rcode = %d\n", err);
+ }
+ }
+
+ if (adapter->portnum == 0) {
+ if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
+ brd_name))
+ strcpy(serial_num, "Unknown");
+
+ pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, serial_num, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, NETXEN_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+ }
+
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
+ NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
+ adapter->max_rds_rings = 3;
+ } else {
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+ }
+}
+
+static int
+netxen_start_firmware(struct netxen_adapter *adapter)
+{
+ int val, err, first_boot;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* required for NX2031 dummy dma */
+ err = nx_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ err = netxen_can_start_firmware(adapter);
+
+ if (err < 0)
+ return err;
+
+ if (!err)
+ goto wait_init;
+
+ first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ err = netxen_check_hw_init(adapter, first_boot);
+ if (err) {
+ dev_err(&pdev->dev, "error in init HW init sequence\n");
+ return err;
+ }
+
+ netxen_request_firmware(adapter);
+
+ err = netxen_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto pcie_strap_init;
+
+ if (first_boot != 0x55555555) {
+ NXWR32(adapter, CRB_CMDPEG_STATE, 0);
+ netxen_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_set_port_mode(adapter);
+
+ err = netxen_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+
+ /* Initialize multicast addr pool owners */
+ val = 0x7654;
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
+ val |= 0x0f000000;
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ }
+
+ err = netxen_init_dummy_dma(adapter);
+ if (err)
+ goto err_out;
+
+ /*
+ * Tell the hardware our version number.
+ */
+ val = (_NETXEN_NIC_LINUX_MAJOR << 16)
+ | ((_NETXEN_NIC_LINUX_MINOR << 8))
+ | (_NETXEN_NIC_LINUX_SUBVERSION);
+ NXWR32(adapter, CRB_DRIVER_VERSION, val);
+
+pcie_strap_init:
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_pcie_strap_init(adapter);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+ if (err) {
+ netxen_free_dummy_dma(adapter);
+ goto err_out;
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
+
+ nx_update_dma_mask(adapter);
+
+ netxen_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ netxen_release_firmware(adapter);
+ return err;
+}
+
+static int
+netxen_nic_request_irq(struct netxen_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct nx_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ handler = netxen_msix_intr;
+ else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ handler = netxen_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = netxen_intr;
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+netxen_nic_free_irq(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
+{
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+/* with rtnl_lock */
+static int
+__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ err = adapter->init_port(adapter, adapter->physical_port);
+ if (err) {
+ printk(KERN_ERR "%s: Failed to initialize port %d\n",
+ netxen_nic_driver_name, adapter->portnum);
+ return err;
+ }
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->macaddr_set(adapter, adapter->mac_addr);
+
+ adapter->set_multi(netdev);
+ adapter->set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ netxen_config_rss(adapter, 1);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_config_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
+
+ netxen_napi_enable(adapter);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 1);
+ else
+ netxen_nic_set_link_parameters(adapter);
+
+ set_bit(__NX_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __netxen_nic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+/* with rtnl_lock */
+static void
+__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 0);
+
+ if (adapter->stop_port)
+ adapter->stop_port(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_p3_free_mac_list(adapter);
+
+ adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
+
+ netxen_napi_disable(adapter);
+
+ netxen_release_tx_buffers(adapter);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+netxen_nic_attach(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ u32 capab2;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = netxen_init_firmware(adapter);
+ if (err)
+ return err;
+
+ adapter->flags &= ~NETXEN_FW_MSS_CAP;
+ if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= NETXEN_FW_MSS_CAP;
+ }
+
+ err = netxen_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = netxen_alloc_sw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting sw resources\n",
+ netdev->name);
+ return err;
+ }
+
+ err = netxen_alloc_hw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting hw resources\n",
+ netdev->name);
+ goto err_out_free_sw;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ tx_ring = adapter->tx_ring;
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ crb_cmd_producer[adapter->portnum]);
+ tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
+ crb_cmd_consumer[adapter->portnum]);
+
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+ netxen_nic_update_cmd_consumer(adapter, tx_ring);
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ netxen_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = netxen_nic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
+ netdev->name);
+ goto err_out_free_rxbuf;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_nic_init_coalesce_defaults(adapter);
+
+ netxen_create_sysfs_entries(adapter);
+
+ adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ netxen_release_rx_buffers(adapter);
+ netxen_free_hw_resources(adapter);
+err_out_free_sw:
+ netxen_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+netxen_nic_detach(struct netxen_adapter *adapter)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ netxen_remove_sysfs_entries(adapter);
+
+ netxen_free_hw_resources(adapter);
+ netxen_release_rx_buffers(adapter);
+ netxen_nic_free_irq(adapter);
+ netxen_napi_del(adapter);
+ netxen_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+int
+netxen_nic_reset_context(struct netxen_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+
+ netxen_nic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (!err)
+ err = __netxen_nic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+netxen_setup_netdev(struct netxen_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err = 0;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->mc_enabled = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ adapter->max_mc_count = 38;
+ else
+ adapter->max_mc_count = 16;
+
+ netdev->netdev_ops = &netxen_netdev_ops;
+ netdev->watchdog_timeo = 5*HZ;
+
+ netxen_nic_change_mtu(netdev, netdev->mtu);
+
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+
+ if (netxen_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+#ifdef CONFIG_PCIEAER
+static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *root = pdev->bus->self;
+ u32 aer_pos;
+
+ /* root bus? */
+ if (!root)
+ return;
+
+ if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+ adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+ return;
+
+ if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
+ return;
+
+ aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+ if (!aer_pos)
+ return;
+
+ pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
+}
+#endif
+
+static int
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct netxen_adapter *adapter = NULL;
+ int i = 0, err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+ u32 val;
+
+ if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+ pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n",
+ module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
+ return -ENODEV;
+ }
+
+ if ((err = pci_enable_device(pdev)))
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+ goto err_out_disable_pdev;
+
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct netxen_adapter));
+ if(!netdev) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ spin_lock_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
+
+ err = netxen_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = netxen_nic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+#ifdef CONFIG_PCIEAER
+ netxen_mask_aer_correctable(adapter);
+#endif
+
+ /* Mezz cards have PCI function 0,2,3 enabled */
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ if (pci_func_id >= 2)
+ adapter->portnum = pci_func_id - 2;
+ break;
+ default:
+ break;
+ }
+
+ err = netxen_check_flash_fw_compatibility(adapter);
+ if (err)
+ goto err_out_iounmap;
+
+ if (adapter->portnum == 0) {
+ val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ if (val != 0xffffffff && val != 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
+ err = netxen_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, CRB_V2P(adapter->portnum));
+ if (i != 0x55555555)
+ adapter->physical_port = i;
+ }
+
+ netxen_nic_clear_stats(adapter);
+
+ err = netxen_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup interrupts, error = %d\n", err);
+ goto err_out_disable_msi;
+ }
+
+ netxen_read_ula_info(adapter);
+
+ err = netxen_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case NETXEN_NIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case NETXEN_NIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ netxen_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ netxen_teardown_intr(adapter);
+
+ netxen_free_dummy_dma(adapter);
+
+err_out_decr_ref:
+ nx_decr_dev_ref_cnt(adapter);
+
+err_out_iounmap:
+ netxen_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static
+void netxen_cleanup_minidump(struct netxen_adapter *adapter)
+{
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+
+ if (adapter->mdump.md_capture_buff) {
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ }
+}
+
+static void netxen_nic_remove(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ netxen_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_free_ip_list(adapter, false);
+ netxen_nic_detach(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+
+ netxen_teardown_intr(adapter);
+ netxen_set_interrupt_mode(adapter, 0);
+ netxen_remove_diag_entries(adapter);
+
+ netxen_cleanup_pci_map(adapter);
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P3(pdev->revision)) {
+ netxen_cleanup_minidump(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ free_netdev(netdev);
+}
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ netxen_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ netxen_nic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_nic_detach(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int netxen_nic_attach_func(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = netxen_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ netxen_nic_detach(adapter);
+err_out:
+ nx_decr_dev_ref_cnt(adapter);
+ return err;
+}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
+#endif
+
+static int netxen_nic_open(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = netxen_nic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ netxen_nic_detach(adapter);
+ return err;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ __netxen_nic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+netxen_tso_check(struct net_device *netdev,
+ struct nx_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (skb_vlan_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vid = skb_vlan_tag_get(skb);
+ netxen_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ netxen_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+netxen_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
+{
+ struct netxen_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = skb_frag_size(frag);
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+netxen_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+static netdev_tx_t
+netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_cmd_buffer *pbuf;
+ struct netxen_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += skb_frag_size(frag);
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_stop_queue(netdev);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (netxen_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+
+ netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
+ netxen_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ netxen_tso_check(netdev, tx_ring, first_desc, skb);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = NXRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = nx_get_temp_state(temp);
+ temp_val = nx_get_temp_val(temp);
+
+ if (temp_state == NX_TEMP_PANIC) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ netdev->name, temp_val);
+ rv = 1;
+ } else if (temp_state == NX_TEMP_WARN) {
+ if (adapter->temp == NX_TEMP_NORMAL) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ netdev->name, temp_val);
+ }
+ } else {
+ if (adapter->temp == NX_TEMP_WARN) {
+ printk(KERN_INFO
+ "%s: Device temperature is now %d degrees C"
+ " in normal range.\n", netdev->name,
+ temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is down\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ } else if (!adapter->ahw.linkup && linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is up\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ }
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ u32 val, port, linkup;
+
+ port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
+ }
+
+ netxen_advert_link_change(adapter, linkup);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct work_struct *work)
+{
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ rtnl_lock();
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* try to scrub interrupt */
+ netxen_napi_disable(adapter);
+
+ netxen_napi_enable(adapter);
+
+ netif_wake_queue(adapter->netdev);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+ } else {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ if (netxen_nic_reset_context(adapter)) {
+ rtnl_unlock();
+ goto request_reset;
+ }
+ }
+ adapter->netdev->trans_start = jiffies;
+ rtnl_unlock();
+ return;
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t netxen_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+ u32 status = 0;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ } else {
+ unsigned long our_int = 0;
+
+ our_int = readl(adapter->crb_int_state_reg);
+
+ /* not our interrupt */
+ if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
+ return IRQ_NONE;
+
+ /* claim interrupt */
+ writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
+
+ /* clear interrupt */
+ netxen_nic_disable_int(sds_ring);
+ }
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msi_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msix_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct napi_struct *napi, int budget)
+{
+ struct nx_host_sds_ring *sds_ring =
+ container_of(napi, struct nx_host_sds_ring, napi);
+
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = netxen_process_cmd_ring(adapter);
+
+ work_done = netxen_process_rcv_ring(sds_ring, budget);
+
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__NX_DEV_UP, &adapter->state))
+ netxen_nic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ disable_irq(adapter->irq);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
+ enable_irq(adapter->irq);
+}
+#endif
+
+static int
+nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count, state;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (count == 0 && state != NX_DEV_FAILED)
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+int
+nx_dev_request_reset(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ adapter->flags |= NETXEN_FW_RESET_OWNER;
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+
+ return ret;
+}
+
+static int
+netxen_can_start_firmware(struct netxen_adapter *adapter)
+{
+ int count;
+ int can_start = 0;
+
+ if (netxen_api_lock(adapter)) {
+ nx_incr_dev_ref_cnt(adapter);
+ return -1;
+ }
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
+ count = 0;
+
+ if (count == 0) {
+ can_start = 1;
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+
+ return can_start;
+}
+
+static void
+netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, delay);
+}
+
+static void
+netxen_cancel_fw_work(struct netxen_adapter *adapter)
+{
+ while (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+netxen_attach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err) {
+ netxen_nic_detach(adapter);
+ goto done;
+ }
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__NX_RESETTING, &adapter->state);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static void
+netxen_fwinit_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ int dev_state;
+ int count;
+ dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (adapter->flags & NETXEN_FW_RESET_OWNER) {
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+ if (count == 1) {
+ if (adapter->mdump.md_enabled) {
+ rtnl_lock();
+ netxen_dump_fw(adapter);
+ rtnl_unlock();
+ }
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ NXWR32(adapter, NX_CRB_DEV_STATE,
+ NX_DEV_FAILED);
+ return;
+ }
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+ dev_state = NX_DEV_COLD;
+ netxen_api_unlock(adapter);
+ }
+ }
+
+ switch (dev_state) {
+ case NX_DEV_COLD:
+ case NX_DEV_READY:
+ if (!netxen_start_firmware(adapter)) {
+ netxen_schedule_work(adapter, netxen_attach_work, 0);
+ return;
+ }
+ break;
+
+ case NX_DEV_NEED_RESET:
+ case NX_DEV_INITALIZING:
+ netxen_schedule_work(adapter,
+ netxen_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+
+ case NX_DEV_FAILED:
+ default:
+ nx_incr_dev_ref_cnt(adapter);
+ break;
+ }
+
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return;
+ }
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
+ netxen_api_unlock(adapter);
+ dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
+ adapter->netdev->name);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static void
+netxen_detach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int ref_cnt = 0, delay;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ netxen_nic_down(adapter, netdev);
+
+ rtnl_lock();
+ netxen_nic_detach(adapter);
+ rtnl_unlock();
+
+ status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+
+ if (status & NX_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == NX_TEMP_PANIC)
+ goto err_ret;
+
+ if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
+ ref_cnt = nx_decr_dev_ref_cnt(adapter);
+
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
+ delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
+
+ adapter->fw_wait_cnt = 0;
+ netxen_schedule_work(adapter, netxen_fwinit_work, delay);
+
+ return;
+
+err_ret:
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int
+netxen_check_health(struct netxen_adapter *adapter)
+{
+ u32 state, heartbit;
+ u32 peg_status;
+ struct net_device *netdev = adapter->netdev;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
+ if (netxen_nic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ if (nx_dev_request_reset(adapter))
+ return 0;
+ goto detach;
+ }
+
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
+ clear_bit(__NX_FW_ATTACHED, &adapter->state);
+
+ dev_err(&netdev->dev, "firmware hang detected\n");
+ peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ peg_status,
+ NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
+ if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
+detach:
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__NX_RESETTING, &adapter->state))
+ netxen_schedule_work(adapter, netxen_detach_work, 0);
+ return 1;
+}
+
+static void
+netxen_fw_poll_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ goto reschedule;
+
+ if (test_bit(__NX_DEV_UP, &adapter->state) &&
+ !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
+ if (!adapter->has_link_events) {
+
+ netxen_nic_handle_phy_intr(adapter);
+
+ if (adapter->link_changed)
+ netxen_nic_set_link_parameters(adapter);
+ }
+ }
+
+ if (netxen_check_health(adapter))
+ return;
+
+reschedule:
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+netxen_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter = netdev_priv(net);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (kstrtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!netxen_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+netxen_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter;
+ int bridged_mode = 0;
+
+ adapter = netdev_priv(net);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = netxen_show_bridged_mode,
+ .store = netxen_store_bridged_mode,
+};
+
+static ssize_t
+netxen_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (kstrtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+netxen_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = netxen_show_diag_mode,
+ .store = netxen_store_diag_mode,
+};
+
+static int
+netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
+ return size;
+}
+
+static ssize_t
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
+ return size;
+}
+
+static int
+netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (adapter->pci_mem_read(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (adapter->pci_mem_write(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = netxen_sysfs_read_crb,
+ .write = netxen_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = netxen_sysfs_read_mem,
+ .write = netxen_sysfs_write_mem,
+};
+
+static ssize_t
+netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_dimm_cfg dimm;
+ u8 dw, rows, cols, banks, ranks;
+ u32 val;
+
+ if (size < attr->size) {
+ netdev_err(netdev, "Invalid size\n");
+ return -EINVAL;
+ }
+
+ memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
+ val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
+
+ /* Checks if DIMM info is valid. */
+ if (val & NETXEN_DIMM_VALID_FLAG) {
+ netdev_err(netdev, "Invalid DIMM flag\n");
+ dimm.presence = 0xff;
+ goto out;
+ }
+
+ rows = NETXEN_DIMM_NUMROWS(val);
+ cols = NETXEN_DIMM_NUMCOLS(val);
+ ranks = NETXEN_DIMM_NUMRANKS(val);
+ banks = NETXEN_DIMM_NUMBANKS(val);
+ dw = NETXEN_DIMM_DATAWIDTH(val);
+
+ dimm.presence = (val & NETXEN_DIMM_PRESENT);
+
+ /* Checks if DIMM info is present. */
+ if (!dimm.presence) {
+ netdev_err(netdev, "DIMM not present\n");
+ goto out;
+ }
+
+ dimm.dimm_type = NETXEN_DIMM_TYPE(val);
+
+ switch (dimm.dimm_type) {
+ case NETXEN_DIMM_TYPE_RDIMM:
+ case NETXEN_DIMM_TYPE_UDIMM:
+ case NETXEN_DIMM_TYPE_SO_DIMM:
+ case NETXEN_DIMM_TYPE_Micro_DIMM:
+ case NETXEN_DIMM_TYPE_Mini_RDIMM:
+ case NETXEN_DIMM_TYPE_Mini_UDIMM:
+ break;
+ default:
+ netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
+ goto out;
+ }
+
+ if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
+ dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
+ else
+ dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
+
+ if (val & NETXEN_DIMM_SIZE) {
+ dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
+ goto out;
+ }
+
+ if (!rows) {
+ netdev_err(netdev, "Invalid no of rows %x\n", rows);
+ goto out;
+ }
+
+ if (!cols) {
+ netdev_err(netdev, "Invalid no of columns %x\n", cols);
+ goto out;
+ }
+
+ if (!banks) {
+ netdev_err(netdev, "Invalid no of banks %x\n", banks);
+ goto out;
+ }
+
+ ranks += 1;
+
+ switch (dw) {
+ case 0x0:
+ dw = 32;
+ break;
+ case 0x1:
+ dw = 33;
+ break;
+ case 0x2:
+ dw = 36;
+ break;
+ case 0x3:
+ dw = 64;
+ break;
+ case 0x4:
+ dw = 72;
+ break;
+ case 0x5:
+ dw = 80;
+ break;
+ case 0x6:
+ dw = 128;
+ break;
+ case 0x7:
+ dw = 144;
+ break;
+ default:
+ netdev_err(netdev, "Invalid data-width %x\n", dw);
+ goto out;
+ }
+
+ dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
+ /* Size returned in MB. */
+ dimm.size = (dimm.size) / 0x100000;
+out:
+ memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
+ return sizeof(struct netxen_dimm_cfg);
+
+}
+
+static struct bin_attribute bin_attr_dimm = {
+ .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
+ .size = sizeof(struct netxen_dimm_cfg),
+ .read = netxen_sysfs_read_dimm,
+};
+
+
+static void
+netxen_create_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
+ /* bridged_mode control */
+ if (device_create_file(dev, &dev_attr_bridged_mode)) {
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+ }
+ }
+}
+
+static void
+netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+netxen_create_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev;
+
+ dev = &pdev->dev;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_dimm))
+ dev_info(dev, "failed to create dimm sysfs entry\n");
+}
+
+
+static void
+netxen_remove_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+ device_remove_bin_file(dev, &bin_attr_dimm);
+}
+
+#ifdef CONFIG_INET
+
+#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
+
+static int
+netxen_destip_supported(struct netxen_adapter *adapter)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{
+ struct nx_ip_list *cur, *tmp_cur;
+
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ }
+}
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
+ struct in_ifaddr *ifa, unsigned long event)
+{
+ struct net_device *dev;
+ struct nx_ip_list *cur, *tmp_cur;
+ struct list_head *head;
+ bool ret = false;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+ if (dev == NULL)
+ goto out;
+
+ switch (event) {
+ case NX_IP_UP:
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
+
+ if (cur->ip_addr == ifa->ifa_address)
+ goto out;
+ }
+
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
+ if (cur == NULL)
+ goto out;
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
+ cur->ip_addr = ifa->ifa_address;
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
+ break;
+ case NX_IP_DOWN:
+ list_for_each_entry_safe(cur, tmp_cur,
+ &adapter->ip_list, list) {
+ if (cur->ip_addr == ifa->ifa_address) {
+ list_del(&cur->list);
+ kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
+ break;
+ }
+ }
+ }
+out:
+ return ret;
+}
+
+static void
+netxen_config_indev_addr(struct netxen_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+
+ if (!netxen_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+}
+
+static void
+netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
+
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_ip_list *pos, *tmp_pos;
+ unsigned long ip_event;
+
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+ netxen_config_indev_addr(adapter, netdev, event);
+
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
+ netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
+ }
+}
+
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
+static int netxen_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *orig_dev = dev;
+ struct net_device *slave;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+netxen_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev, *slave;
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block netxen_netdev_cb = {
+ .notifier_call = netxen_netdev_event,
+};
+
+static struct notifier_block netxen_inetaddr_cb = {
+ .notifier_call = netxen_inetaddr_event,
+};
+#else
+static void
+netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{ }
+#endif
+
+static const struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
+static struct pci_driver netxen_driver = {
+ .name = netxen_nic_driver_name,
+ .id_table = netxen_pci_tbl,
+ .probe = netxen_nic_probe,
+ .remove = netxen_nic_remove,
+#ifdef CONFIG_PM
+ .suspend = netxen_nic_suspend,
+ .resume = netxen_nic_resume,
+#endif
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
+};
+
+static int __init netxen_init_module(void)
+{
+ printk(KERN_INFO "%s\n", netxen_nic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&netxen_netdev_cb);
+ register_inetaddr_notifier(&netxen_inetaddr_cb);
+#endif
+ return pci_register_driver(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+ pci_unregister_driver(&netxen_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&netxen_inetaddr_cb);
+ unregister_netdevice_notifier(&netxen_netdev_cb);
+#endif
+}
+
+module_exit(netxen_exit_module);
diff --git a/kernel/drivers/net/ethernet/qlogic/qla3xxx.c b/kernel/drivers/net/ethernet/qlogic/qla3xxx.c
new file mode 100644
index 000000000..484771321
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -0,0 +1,3949 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/prefetch.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME "qla3xxx"
+#define DRV_STRING "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION "v2.03.00-k5"
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+#define TIMED_OUT_MSG \
+"Timed out waiting for management port to get free before issuing command\n"
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+ = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static const struct pci_device_id ql3xxx_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ * These are the known PHY's which are used
+ */
+enum PHY_DEVICE_TYPE {
+ PHY_TYPE_UNKNOWN = 0,
+ PHY_VITESSE_VSC8211,
+ PHY_AGERE_ET1011C,
+ MAX_PHY_DEV_TYPES
+};
+
+struct PHY_DEVICE_INFO {
+ const enum PHY_DEVICE_TYPE phyDevice;
+ const u32 phyIdOUI;
+ const u16 phyIdModel;
+ const char *name;
+};
+
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+ {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+ {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+ {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+};
+
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+ u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ unsigned int seconds = 3;
+
+ do {
+ writel((sem_mask | sem_bits),
+ &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ if ((value & (sem_mask >> 16)) == sem_bits)
+ return 0;
+ ssleep(1);
+ } while (--seconds);
+ return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+ readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+ int i = 0;
+
+ do {
+ if (ql_sem_lock(qdev,
+ QL_DRVR_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 1)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "driver lock acquired\n");
+ return 1;
+ }
+ ssleep(1);
+ } while (++i < 10);
+
+ netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+ return 0;
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ writel(((ISP_CONTROL_NP_MASK << 16) | page),
+ &port_regs->CommonRegs.ispControlStatus);
+ readl(&port_regs->CommonRegs.ispControlStatus);
+ qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ value = readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ value = readl(reg);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ writel(value, reg);
+ readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_write_nvram_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+ udelay(1);
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 1)
+ ql_set_register_page(qdev, 1);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 2)
+ ql_set_register_page(qdev, 2);
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+ struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+ dma_addr_t map;
+ int err;
+ lrg_buf_cb->next = NULL;
+
+ if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
+ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+ } else {
+ qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+ qdev->lrg_buf_free_tail = lrg_buf_cb;
+ }
+
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ qdev->lrg_buf_skb_check++;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+
+ qdev->lrg_buf_skb_check++;
+ return;
+ }
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ }
+ }
+
+ qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+ *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+
+ if (lrg_buf_cb != NULL) {
+ qdev->lrg_buf_free_head = lrg_buf_cb->next;
+ if (qdev->lrg_buf_free_head == NULL)
+ qdev->lrg_buf_free_tail = NULL;
+ qdev->lrg_buf_free_count--;
+ }
+
+ return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+ unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+ ql_write_nvram_reg(qdev, spir,
+ ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+ int i;
+ u32 mask;
+ u32 dataBit;
+ u32 previousBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Clock in a zero, then do the start bit */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
+
+ mask = 1 << (FM93C56A_CMD_BITS - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+ dataBit = (cmd & mask)
+ ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /* If the bit changed, change the DO state to match */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ cmd = cmd << 1;
+ }
+
+ mask = 1 << (addrBits - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < addrBits; i++) {
+ dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match
+ */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ eepromAddr = eepromAddr << 1;
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+ int i;
+ u32 data = 0;
+ u32 dataBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Read the data bits */
+ /* The first bit is a dummy. Clock right over it. */
+ for (i = 0; i < dataBits; i++) {
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_FALL);
+ dataBit = (ql_read_common_reg(qdev, spir) &
+ AUBURN_EEPROM_DI_1) ? 1 : 0;
+ data = (data << 1) | dataBit;
+ }
+ *value = (u16)data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+ u32 eepromAddr, unsigned short *value)
+{
+ fm93c56a_select(qdev);
+ fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+ fm93c56a_datain(qdev, value);
+ fm93c56a_deselect(qdev);
+}
+
+static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
+{
+ __le16 *p = (__le16 *)ndev->dev_addr;
+ p[0] = cpu_to_le16(addr[0]);
+ p[1] = cpu_to_le16(addr[1]);
+ p[2] = cpu_to_le16(addr[2]);
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+ u16 *pEEPROMData;
+ u16 checksum = 0;
+ u32 index;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ pEEPROMData = (u16 *)&qdev->nvram_data;
+ qdev->eeprom_cmd_data = 0;
+ if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 10)) {
+ pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ for (index = 0; index < EEPROM_SIZE; index++) {
+ eeprom_readword(qdev, index, pEEPROMData);
+ checksum += *pEEPROMData;
+ pEEPROMData++;
+ }
+ ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+ if (checksum != 0) {
+ netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+ checksum);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+ PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 temp;
+ int count = 1000;
+
+ while (count) {
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+ if (!(temp & MAC_MII_STATUS_BSY))
+ return 0;
+ udelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 scanControl;
+
+ if (qdev->numPorts > 1) {
+ /* Auto scan will cycle through multiple ports */
+ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+ } else {
+ scanControl = MAC_MII_CONTROL_SC;
+ }
+
+ /*
+ * Scan register 1 of PHY/PETBI,
+ * Set up to scan both devices
+ * The autoscan starts from the first register, completes
+ * the last one before rolling over to the first
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (scanControl) |
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+ u8 ret;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* See if scan mode is enabled before we turn it off */
+ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+ (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+ /* Scan is enabled */
+ ret = 1;
+ } else {
+ /* Scan is disabled */
+ ret = 0;
+ }
+
+ /*
+ * When disabling scan mode you must first change the MII register
+ * address
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+ MAC_MII_CONTROL_RC) << 16));
+
+ return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+ u16 regAddr, u16 value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete 9/10/04 SJP */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+ u16 *value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+ u32 temp;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete. */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+ u32 temp;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+ ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset(qdev);
+ ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset_ex(qdev);
+ ql_petbi_start_neg_ex(qdev);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
+{
+ netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
+ /* power down device bit 11 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
+ /* enable diagnostic mode bit 2 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
+ /* point to hidden reg 0x2806 */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
+ /* Write new PHYAD w/bit 5 set */
+ ql_mii_write_reg_ex(qdev, 0x11,
+ 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+ /*
+ * Disable diagnostic mode bit 2 = 0
+ * Power up device bit 11 = 0
+ * Link up (on) and activity (blink)
+ */
+ ql_mii_write_reg(qdev, 0x12, 0x840a);
+ ql_mii_write_reg(qdev, 0x00, 0x1140);
+ ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
+}
+
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+ u16 phyIdReg0, u16 phyIdReg1)
+{
+ enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
+ u32 oui;
+ u16 model;
+ int i;
+
+ if (phyIdReg0 == 0xffff)
+ return result;
+
+ if (phyIdReg1 == 0xffff)
+ return result;
+
+ /* oui is split between two registers */
+ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
+
+ model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
+
+ /* Scan table for this PHY */
+ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+ if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+ (model == PHY_DEVICES[i].phyIdModel)) {
+ netdev_info(qdev->ndev, "Phy: %s\n",
+ PHY_DEVICES[i].name);
+ result = PHY_DEVICES[i].phyDevice;
+ break;
+ }
+ }
+
+ return result;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
+ return 0;
+
+ reg = (reg >> 8) & 3;
+ break;
+ }
+ default:
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+
+ reg = (((reg & 0x18) >> 3) & 3);
+ }
+
+ switch (reg) {
+ case 2:
+ return SPEED_1000;
+ case 1:
+ return SPEED_100;
+ case 0:
+ return SPEED_10;
+ default:
+ return -1;
+ }
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg))
+ return 0;
+
+ return ((reg & 0x0080) && (reg & 0x1000)) != 0;
+ }
+ case PHY_VITESSE_VSC8211:
+ default: {
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+ return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+ }
+ }
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+static int PHY_Setup(struct ql3_adapter *qdev)
+{
+ u16 reg1;
+ u16 reg2;
+ bool agereAddrChangeNeeded = false;
+ u32 miiAddr = 0;
+ int err;
+
+ /* Determine the PHY we are using by reading the ID's */
+ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+ return err;
+ }
+
+ /* Check if we have a Agere PHY */
+ if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
+
+ /* Determine which MII address we should be using
+ determined by the index of the card */
+ if (qdev->mac_index == 0)
+ miiAddr = MII_AGERE_ADDR_1;
+ else
+ miiAddr = MII_AGERE_ADDR_2;
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev,
+ "Could not read from reg PHY_ID_0_REG after Agere detected\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+ return err;
+ }
+
+ /* We need to remember to initialize the Agere PHY */
+ agereAddrChangeNeeded = true;
+ }
+
+ /* Determine the particular PHY we have on board to apply
+ PHY specific initializations */
+ qdev->phyType = getPhyType(qdev, reg1, reg2);
+
+ if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
+ /* need this here so address gets changed */
+ phyAgereSpecificInit(qdev, miiAddr);
+ } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
+ netdev_err(qdev->ndev, "PHY is unknown\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+ else
+ value = (MAC_CONFIG_REG_PE << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+ else
+ value = (MAC_CONFIG_REG_SR << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+ else
+ value = (MAC_CONFIG_REG_GM << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+ else
+ value = (MAC_CONFIG_REG_FD << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value =
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+ else
+ value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_SM0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_SM1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ ql_mii_read_reg(qdev, 0x00, &reg);
+ return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AC0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AC1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
+ return 1;
+ }
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+ return 0;
+}
+
+/*
+ * ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return ql_is_petbi_neg_pause(qdev);
+ else
+ return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AE0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AE1;
+ break;
+ }
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return SPEED_1000;
+ else
+ return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return 1;
+ else
+ return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = ISP_CONTROL_LINK_DN_0;
+ break;
+ case 1:
+ bitToCheck = ISP_CONTROL_LINK_DN_1;
+ break;
+ }
+
+ temp =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ switch (qdev->mac_index) {
+ case 0:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_0) |
+ (ISP_CONTROL_LINK_DN_0 << 16));
+ break;
+
+ case 1:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_1) |
+ (ISP_CONTROL_LINK_DN_1 << 16));
+ break;
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_F1_ENABLED;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_F3_ENABLED;
+ break;
+ default:
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "not link master\n");
+ return 0;
+ }
+
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+ return 1;
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ u16 portConfiguration;
+
+ if (qdev->phyType == PHY_AGERE_ET1011C)
+ ql_mii_write_reg(qdev, 0x13, 0x0000);
+ /* turn off external loopback */
+
+ if (qdev->mac_index == 0)
+ portConfiguration =
+ qdev->nvram_data.macCfg_port0.portConfiguration;
+ else
+ portConfiguration =
+ qdev->nvram_data.macCfg_port1.portConfiguration;
+
+ /* Some HBA's in the field are set to 0 and they need to
+ be reinterpreted with a default value */
+ if (portConfiguration == 0)
+ portConfiguration = PORT_CONFIG_DEFAULT;
+
+ /* Set the 1000 advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_GIG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+ reg |= PHY_GIG_ADV_1000F;
+ else
+ reg |= PHY_GIG_ADV_1000H;
+ }
+
+ ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ /* Set the 10/100 & pause negotiation advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_NEG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+ reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
+
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100F;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10F;
+ }
+
+ if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100H;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10H;
+ }
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
+ reg |= 1;
+
+ ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, CONTROL_REG,
+ reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev)
+{
+ ql_phy_reset_ex(qdev);
+ PHY_Setup(qdev);
+ ql_phy_start_neg_ex(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp, linkState;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_UP0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_UP1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck)
+ linkState = LS_UP;
+ else
+ linkState = LS_DOWN;
+
+ return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
+ return -1;
+ }
+
+ if (ql_is_fiber(qdev)) {
+ ql_petbi_init(qdev);
+ } else {
+ /* Copper port */
+ ql_phy_init_ex(qdev);
+ }
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (!ql_auto_neg_error(qdev)) {
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ /* configure the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Configuring link\n");
+ ql_mac_cfg_soft_reset(qdev, 1);
+ ql_mac_cfg_gig(qdev,
+ (ql_get_link_speed
+ (qdev) ==
+ SPEED_1000));
+ ql_mac_cfg_full_dup(qdev,
+ ql_is_link_full_dup
+ (qdev));
+ ql_mac_cfg_pause(qdev,
+ ql_is_neg_pause
+ (qdev));
+ ql_mac_cfg_soft_reset(qdev, 0);
+
+ /* enable the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Enabling mac\n");
+ ql_mac_enable(qdev, 1);
+ }
+
+ qdev->port_link_state = LS_UP;
+ netif_start_queue(qdev->ndev);
+ netif_carrier_on(qdev->ndev);
+ netif_info(qdev, link, qdev->ndev,
+ "Link is up at %d Mbps, %s duplex\n",
+ ql_get_link_speed(qdev),
+ ql_is_link_full_dup(qdev) ? "full" : "half");
+
+ } else { /* Remote error detected */
+
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Remote error detected. Calling ql_port_start()\n");
+ /*
+ * ql_port_start() is shared code and needs
+ * to lock the PHY on it's own.
+ */
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ if (ql_port_start(qdev)) /* Restart port */
+ return -1;
+ return 0;
+ }
+ }
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static void ql_link_state_machine_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, link_state_work.work);
+
+ u32 curr_link_state;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ curr_link_state = ql_get_link_state(qdev);
+
+ if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+ netif_info(qdev, link, qdev->ndev,
+ "Reset in progress, skip processing link state\n");
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ return;
+ }
+
+ switch (qdev->port_link_state) {
+ default:
+ if (test_bit(QL_LINK_MASTER, &qdev->flags))
+ ql_port_start(qdev);
+ qdev->port_link_state = LS_DOWN;
+ /* Fall Through */
+
+ case LS_DOWN:
+ if (curr_link_state == LS_UP) {
+ netif_info(qdev, link, qdev->ndev, "Link is up\n");
+ if (ql_is_auto_neg_complete(qdev))
+ ql_finish_auto_neg(qdev);
+
+ if (qdev->port_link_state == LS_UP)
+ ql_link_down_detect_clear(qdev);
+
+ qdev->port_link_state = LS_UP;
+ }
+ break;
+
+ case LS_UP:
+ /*
+ * See if the link is currently down or went down and came
+ * back up
+ */
+ if (curr_link_state == LS_DOWN) {
+ netif_info(qdev, link, qdev->ndev, "Link is down\n");
+ qdev->port_link_state = LS_DOWN;
+ }
+ if (ql_link_down_detect(qdev))
+ qdev->port_link_state = LS_DOWN;
+ break;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+ if (ql_this_adapter_controls_port(qdev))
+ set_bit(QL_LINK_MASTER, &qdev->flags);
+ else
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+ ql_mii_enable_scan_mode(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_petbi_init_ex(qdev);
+ } else {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_phy_init_ex(qdev);
+ }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+ u32 reg;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ql_write_page0_reg(qdev,
+ &port_regs->macMIIMgmtControlReg, 0x0f00000);
+
+ /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+ reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
+ SUPPORTED_FIBRE | \
+ SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP) \
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+ return SUPPORTED_OPTICAL_MODES;
+
+ return SUPPORTED_TP_MODES;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_auto_cfg(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+ u32 status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_get_link_speed(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_link_full_dup(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = ql_supported_modes(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ ecmd->port = PORT_FIBRE;
+ } else {
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = qdev->PHYAddr;
+ }
+ ecmd->advertising = ql_supported_modes(qdev);
+ ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+ ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
+ ecmd->duplex = ql_get_full_dup(qdev);
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+static void ql_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ u32 reg;
+ if (qdev->mac_index == 0)
+ reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
+ else
+ reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
+
+ pause->autoneg = ql_get_auto_cfg_status(qdev);
+ pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
+ pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
+}
+
+static const struct ethtool_ops ql3xxx_ethtool_ops = {
+ .get_settings = ql_get_settings,
+ .get_drvinfo = ql_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+ .get_pauseparam = ql_get_pauseparam,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+ dma_addr_t map;
+ int err;
+
+ while (lrg_buf_cb) {
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb =
+ netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Failed netdev_alloc_skb()\n");
+ break;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from
+ * first buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+ break;
+ }
+
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ --qdev->lrg_buf_skb_check;
+ if (!qdev->lrg_buf_skb_check)
+ return 1;
+ }
+ }
+ lrg_buf_cb = lrg_buf_cb->next;
+ }
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
+
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
+ wmb();
+ writel(qdev->small_buf_q_producer_index,
+ &port_regs->CommonRegs.rxSmallQProducerIndex);
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct bufq_addr_element *lrg_buf_q_ele;
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if ((qdev->lrg_buf_free_count >= 8) &&
+ (qdev->lrg_buf_release_cnt >= 16)) {
+
+ if (qdev->lrg_buf_skb_check)
+ if (!ql_populate_free_queue(qdev))
+ return;
+
+ lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+ while ((qdev->lrg_buf_release_cnt >= 16) &&
+ (qdev->lrg_buf_free_count >= 8)) {
+
+ for (i = 0; i < 8; i++) {
+ lrg_buf_cb =
+ ql_get_from_lrg_buf_free_list(qdev);
+ lrg_buf_q_ele->addr_high =
+ lrg_buf_cb->buf_phy_addr_high;
+ lrg_buf_q_ele->addr_low =
+ lrg_buf_cb->buf_phy_addr_low;
+ lrg_buf_q_ele++;
+
+ qdev->lrg_buf_release_cnt--;
+ }
+
+ qdev->lrg_buf_q_producer_index++;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ qdev->num_lbufq_entries)
+ qdev->lrg_buf_q_producer_index = 0;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ (qdev->num_lbufq_entries - 1)) {
+ lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+ }
+ }
+ wmb();
+ qdev->lrg_buf_next_free = lrg_buf_q_ele;
+ writel(qdev->lrg_buf_q_producer_index,
+ &port_regs->CommonRegs.rxLargeQProducerIndex);
+ }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_warn(qdev->ndev,
+ "Frame too short but it was padded and sent\n");
+ }
+
+ tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+
+ /* Check the transmit response flags for any errors */
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_err(qdev->ndev,
+ "Frame too short to be legal, frame not sent\n");
+
+ qdev->ndev->stats.tx_errors++;
+ goto frame_not_sent;
+ }
+
+ if (tx_cb->seg_count == 0) {
+ netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+ mac_rsp->transaction_id);
+
+ qdev->ndev->stats.tx_errors++;
+ goto invalid_seg_count;
+ }
+
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+ tx_cb->seg_count--;
+ if (tx_cb->seg_count) {
+ for (i = 1; i < tx_cb->seg_count; i++) {
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[i], maplen),
+ PCI_DMA_TODEVICE);
+ }
+ }
+ qdev->ndev->stats.tx_packets++;
+ qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
+
+frame_not_sent:
+ dev_kfree_skb_irq(tx_cb->skb);
+ tx_cb->skb = NULL;
+
+invalid_seg_count:
+ atomic_inc(&qdev->tx_count);
+}
+
+static void ql_get_sbuf(struct ql3_adapter *qdev)
+{
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+ qdev->small_buf_release_cnt++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
+ lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == qdev->num_large_buffers)
+ qdev->lrg_buf_index = 0;
+ return lrg_buf_cb;
+}
+
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion. The first buffer contains
+ * (some) header info, the second the remainder of the headers plus
+ * the data. For this chip we reserve some space at the top of the
+ * receive buffer so that the header info in buffer one can be
+ * prepended to the buffer two. Buffer two is the sent up while
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a
+ * simpler process. 3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb;
+ u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb = lrg_buf_cb2->skb;
+
+ qdev->ndev->stats.rx_packets++;
+ qdev->ndev->stats.rx_bytes += length;
+
+ skb_put(skb, length);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb->data);
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+ netif_receive_skb(skb);
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+ struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb1 = NULL, *skb2;
+ struct net_device *ndev = qdev->ndev;
+ u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+ u16 size = 0;
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /* start of first buffer on 3022 */
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+ skb1 = lrg_buf_cb1->skb;
+ size = ETH_HLEN;
+ if (*((u16 *) skb1->data) != 0xFFFF)
+ size += VLAN_ETH_HLEN - ETH_HLEN;
+ }
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb2 = lrg_buf_cb2->skb;
+
+ skb_put(skb2, length); /* Just the second buffer length here. */
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb2->data);
+
+ skb_checksum_none_assert(skb2);
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /*
+ * Copy the ethhdr from first buffer to second. This
+ * is necessary for 3022 IP completions.
+ */
+ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
+ skb_push(skb2, size), size);
+ } else {
+ u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+ if (checksum &
+ (IB_IP_IOCB_RSP_3032_ICE |
+ IB_IP_IOCB_RSP_3032_CE)) {
+ netdev_err(ndev,
+ "%s: Bad checksum for this %s packet, checksum = %x\n",
+ __func__,
+ ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+ "TCP" : "UDP"), checksum);
+ } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
+ (checksum & IB_IP_IOCB_RSP_3032_UDP &&
+ !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
+ skb2->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+ netif_receive_skb(skb2);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += length;
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev,
+ int *tx_cleaned, int *rx_cleaned, int work_to_do)
+{
+ struct net_rsp_iocb *net_rsp;
+ struct net_device *ndev = qdev->ndev;
+ int work_done = 0;
+
+ /* While there are entries in the completion queue. */
+ while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
+ qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+
+ net_rsp = qdev->rsp_current;
+ rmb();
+ /*
+ * Fix 4032 chip's undocumented "feature" where bit-8 is set
+ * if the inbound completion is for a VLAN.
+ */
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ net_rsp->opcode &= 0x7f;
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_IOCB_FN0:
+ case OPCODE_OB_MAC_IOCB_FN2:
+ ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+ net_rsp);
+ (*tx_cleaned)++;
+ break;
+
+ case OPCODE_IB_MAC_IOCB:
+ case OPCODE_IB_3032_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+
+ case OPCODE_IB_IP_IOCB:
+ case OPCODE_IB_3032_IP_IOCB:
+ ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+ default: {
+ u32 *tmp = (u32 *)net_rsp;
+ netdev_err(ndev,
+ "Hit default case, not handled!\n"
+ " dropping the packet, opcode = %x\n"
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+ net_rsp->opcode,
+ (unsigned long int)tmp[0],
+ (unsigned long int)tmp[1],
+ (unsigned long int)tmp[2],
+ (unsigned long int)tmp[3]);
+ }
+ }
+
+ qdev->rsp_consumer_index++;
+
+ if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+ } else {
+ qdev->rsp_current++;
+ }
+
+ work_done = *tx_cleaned + *rx_cleaned;
+ }
+
+ return work_done;
+}
+
+static int ql_poll(struct napi_struct *napi, int budget)
+{
+ struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
+ int rx_cleaned = 0, tx_cleaned = 0;
+ unsigned long hw_flags;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+
+ if (tx_cleaned + rx_cleaned != budget) {
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ __napi_complete(napi);
+ ql_update_small_bufq_prod_index(qdev);
+ ql_update_lrg_bufq_prod_index(qdev);
+ writel(qdev->rsp_consumer_index,
+ &port_regs->CommonRegs.rspQConsumerIndex);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ ql_enable_interrupts(qdev);
+ }
+ return tx_cleaned + rx_cleaned;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
+{
+
+ struct net_device *ndev = dev_id;
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ int handled = 1;
+ u32 var;
+
+ value = ql_read_common_reg_l(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+
+ if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+ spin_lock(&qdev->adapter_lock);
+ netif_stop_queue(qdev->ndev);
+ netif_carrier_off(qdev->ndev);
+ ql_disable_interrupts(qdev);
+ qdev->port_link_state = LS_DOWN;
+ set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
+
+ if (value & ISP_CONTROL_FE) {
+ /*
+ * Chip Fatal Error.
+ */
+ var =
+ ql_read_page0_reg_l(qdev,
+ &port_regs->PortFatalErrStatus);
+ netdev_warn(ndev,
+ "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+ var);
+ set_bit(QL_RESET_START, &qdev->flags) ;
+ } else {
+ /*
+ * Soft Reset Requested.
+ */
+ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+ netdev_err(ndev,
+ "Another function issued a reset to the chip. ISR value = %x\n",
+ value);
+ }
+ queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
+ spin_unlock(&qdev->adapter_lock);
+ } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+ ql_disable_interrupts(qdev);
+ if (likely(napi_schedule_prep(&qdev->napi)))
+ __napi_schedule(&qdev->napi);
+ } else
+ return IRQ_NONE;
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given. Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to point to the next OAL if more frags
+ * are coming. That is why the frags:segment count ratio is not linear.
+ */
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
+{
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ return 1;
+
+ if (frags <= 2)
+ return frags + 1;
+ else if (frags <= 6)
+ return frags + 2;
+ else if (frags <= 10)
+ return frags + 3;
+ else if (frags <= 14)
+ return frags + 4;
+ else if (frags <= 18)
+ return frags + 5;
+ return -1;
+}
+
+static void ql_hw_csum_setup(const struct sk_buff *skb,
+ struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+
+ mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
+ mac_iocb_ptr->ip_hdr_len = ip->ihl;
+
+ if (ip->protocol == IPPROTO_TCP) {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+ OB_3032MAC_IOCB_REQ_IC;
+ } else {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+ OB_3032MAC_IOCB_REQ_IC;
+ }
+
+}
+
+/*
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_send_map(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct ql_tx_buf_cb *tx_cb,
+ struct sk_buff *skb)
+{
+ struct oal *oal;
+ struct oal_entry *oal_entry;
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int err;
+ int completed_segs, i;
+ int seg_cnt, seg = 0;
+ int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
+
+ seg_cnt = tx_cb->seg_count;
+ /*
+ * Map the skb buffer first.
+ */
+ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+ err);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ seg++;
+
+ if (seg_cnt == 1) {
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+ }
+ oal = tx_cb->oal;
+ for (completed_segs = 0;
+ completed_segs < frag_cnt;
+ completed_segs++, seg++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+ oal_entry++;
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ * Continuation entry points to outbound address list.
+ */
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ map = pci_map_single(qdev->pdev, oal,
+ sizeof(struct oal),
+ PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+ OAL_CONT_ENTRY);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
+ sizeof(struct oal));
+ oal_entry = (struct oal_entry *)oal;
+ oal++;
+ seg++;
+ }
+
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping frags failed with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(skb_frag_size(frag));
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
+ }
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+
+map_error:
+ /* A PCI mapping failed and now we will need to back out
+ * We need to traverse through the oal's and associated pages which
+ * have been mapped and now we must unmap them to clean up properly
+ */
+
+ seg = 1;
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal = tx_cb->oal;
+ for (i = 0; i < completed_segs; i++, seg++) {
+ oal_entry++;
+
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ */
+
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ oal++;
+ seg++;
+ }
+
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+
+ return NETDEV_TX_BUSY;
+
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
+ * in the IOCB plus a chain of outbound address lists (OAL) that
+ * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
+ * will be used to point to an OAL when more ALP entries are required.
+ * The IOCB is always the top of the chain followed by one or more
+ * OALs (when necessary).
+ */
+static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct ql_tx_buf_cb *tx_cb;
+ u32 tot_len = skb->len;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ if (unlikely(atomic_read(&qdev->tx_count) < 2))
+ return NETDEV_TX_BUSY;
+
+ tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+ tx_cb->seg_count = ql_get_seg_count(qdev,
+ skb_shinfo(skb)->nr_frags);
+ if (tx_cb->seg_count == -1) {
+ netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ return NETDEV_TX_OK;
+ }
+
+ mac_iocb_ptr = tx_cb->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+ mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+ mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
+ mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+ mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+ mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
+ tx_cb->skb = skb;
+ if (qdev->device_id == QL3032_DEVICE_ID &&
+ skb->ip_summed == CHECKSUM_PARTIAL)
+ ql_hw_csum_setup(skb, mac_iocb_ptr);
+
+ if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+ netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ wmb();
+ qdev->req_producer_index++;
+ if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+ qdev->req_producer_index = 0;
+ wmb();
+ ql_write_common_reg_l(qdev,
+ &port_regs->CommonRegs.reqQProducerIndex,
+ qdev->req_producer_index);
+
+ netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+ "tx queued, slot %d, len %d\n",
+ qdev->req_producer_index, skb->len);
+
+ atomic_dec(&qdev->tx_count);
+ return NETDEV_TX_OK;
+}
+
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ qdev->req_q_size =
+ (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+ qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+ /* The barrier is required to ensure request and response queue
+ * addr writes to the registers.
+ */
+ wmb();
+
+ qdev->req_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->req_q_size,
+ &qdev->req_q_phy_addr);
+
+ if ((qdev->req_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+ netdev_err(qdev->ndev, "reqQ failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->rsp_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr);
+
+ if ((qdev->rsp_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+ netdev_err(qdev->ndev, "rspQ allocation failed\n");
+ pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
+ qdev->req_q_virt_addr,
+ qdev->req_q_phy_addr);
+ return -ENOMEM;
+ }
+
+ set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+
+ return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+
+ pci_free_consistent(qdev->pdev,
+ qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+ qdev->req_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+ qdev->rsp_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+ /* Create Large Buffer Queue */
+ qdev->lrg_buf_q_size =
+ qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+ if (qdev->lrg_buf_q_size < PAGE_SIZE)
+ qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
+ return -ENOMEM;
+
+ qdev->lrg_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr);
+
+ if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "lBufQ failed\n");
+ return -ENOMEM;
+ }
+ qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+ qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+ /* Create Small Buffer Queue */
+ qdev->small_buf_q_size =
+ NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ if (qdev->small_buf_q_size < PAGE_SIZE)
+ qdev->small_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+ qdev->small_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr);
+
+ if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
+ pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+ qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+ set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ kfree(qdev->lrg_buf);
+ pci_free_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+
+ qdev->lrg_buf_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
+
+ qdev->small_buf_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct bufq_addr_element *small_buf_q_entry;
+
+ /* Currently we allocate on one of memory and use it for smallbuffers */
+ qdev->small_buf_total_size =
+ (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr);
+
+ if (qdev->small_buf_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+ qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+ small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+ /* Initialize the small buffer queue. */
+ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+ small_buf_q_entry->addr_high =
+ cpu_to_le32(qdev->small_buf_phy_addr_high);
+ small_buf_q_entry->addr_low =
+ cpu_to_le32(qdev->small_buf_phy_addr_low +
+ (i * QL_SMALL_BUFFER_SIZE));
+ small_buf_q_entry++;
+ }
+ qdev->small_buf_index = 0;
+ set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ if (qdev->small_buf_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
+
+ qdev->small_buf_virt_addr = NULL;
+ }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+ int i = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ if (lrg_buf_cb->skb) {
+ dev_kfree_skb(lrg_buf_cb->skb);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
+ PCI_DMA_FROMDEVICE);
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ } else {
+ break;
+ }
+ }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+ buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+ buf_addr_ele++;
+ }
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ int err;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ netdev_err(qdev->ndev,
+ "large buff alloc failed for %d bytes at index %d\n",
+ qdev->lrg_buffer_len * 2, i);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ } else {
+
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ lrg_buf_cb->index = i;
+ lrg_buf_cb->skb = skb;
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ }
+ }
+ return 0;
+}
+
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+
+ tx_cb = &qdev->tx_buf[0];
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ kfree(tx_cb->oal);
+ tx_cb->oal = NULL;
+ tx_cb++;
+ }
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+ struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
+
+ /* Create free list of transmit buffers */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
+ tx_cb = &qdev->tx_buf[i];
+ tx_cb->skb = NULL;
+ tx_cb->queue_entry = req_q_curr;
+ req_q_curr++;
+ tx_cb->oal = kmalloc(512, GFP_KERNEL);
+ if (tx_cb->oal == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+ if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
+ qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+ } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+ /*
+ * Bigger buffers, so less of them.
+ */
+ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+ } else {
+ netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
+ qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
+ return -ENOMEM;
+ }
+ qdev->num_large_buffers =
+ qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+ qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+ qdev->max_frame_size =
+ (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+ /*
+ * First allocate a page of shared memory and use it for shadow
+ * locations of Network Request Queue Consumer Address Register and
+ * Network Completion Queue Producer Index Register
+ */
+ qdev->shadow_reg_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
+ qdev->req_consumer_index_phy_addr_high =
+ MS_64BITS(qdev->shadow_reg_phy_addr);
+ qdev->req_consumer_index_phy_addr_low =
+ LS_64BITS(qdev->shadow_reg_phy_addr);
+
+ qdev->prsp_producer_index =
+ (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+ qdev->rsp_producer_index_phy_addr_high =
+ qdev->req_consumer_index_phy_addr_high;
+ qdev->rsp_producer_index_phy_addr_low =
+ qdev->req_consumer_index_phy_addr_low + 8;
+ } else {
+ netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
+ return -ENOMEM;
+ }
+
+ if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
+ goto err_req_rsp;
+ }
+
+ if (ql_alloc_buffer_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
+ goto err_buffer_queues;
+ }
+
+ if (ql_alloc_small_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ if (ql_alloc_large_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ /* Initialize the large buffer queue. */
+ ql_init_large_buffers(qdev);
+ if (ql_create_send_free_list(qdev))
+ goto err_free_list;
+
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ return 0;
+err_free_list:
+ ql_free_send_free_list(qdev);
+err_small_buffers:
+ ql_free_buffer_queues(qdev);
+err_buffer_queues:
+ ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+
+ return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+ ql_free_send_free_list(qdev);
+ ql_free_large_buffers(qdev);
+ ql_free_small_buffers(qdev);
+ ql_free_buffer_queues(qdev);
+ ql_free_net_req_rsp_queues(qdev);
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+ qdev->shadow_reg_virt_addr = NULL;
+ }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_local_ram_registers __iomem *local_ram =
+ (void __iomem *)qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 4))
+ return -1;
+
+ ql_write_page2_reg(qdev,
+ &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->maxBufletCount,
+ qdev->nvram_data.bufletCount);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdLow,
+ (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+ (qdev->nvram_data.tcpWindowThreshold0));
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdHigh,
+ qdev->nvram_data.tcpWindowThreshold50);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableBase,
+ (qdev->nvram_data.ipHashTableBaseHi << 16) |
+ qdev->nvram_data.ipHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableCount,
+ qdev->nvram_data.ipHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableBase,
+ (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+ qdev->nvram_data.tcpHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableCount,
+ qdev->nvram_data.tcpHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->ncbBase,
+ (qdev->nvram_data.ncbTableBaseHi << 16) |
+ qdev->nvram_data.ncbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxNcbCount,
+ qdev->nvram_data.ncbTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->drbBase,
+ (qdev->nvram_data.drbTableBaseHi << 16) |
+ qdev->nvram_data.drbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxDrbCount,
+ qdev->nvram_data.drbTableSize);
+ ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+ return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+ u32 value;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+ (void __iomem *)port_regs;
+ u32 delay = 10;
+ int status = 0;
+
+ if (ql_mii_setup(qdev))
+ return -1;
+
+ /* Bring out PHY out of reset */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_WE |
+ (ISP_SERIAL_PORT_IF_WE << 16)));
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
+ qdev->port_link_state = LS_DOWN;
+ netif_carrier_off(qdev->ndev);
+
+ /* V2 chip fix for ARS-39168. */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_SDE |
+ (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+ /* Request Queue Registers */
+ *((u32 *)(qdev->preq_consumer_index)) = 0;
+ atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
+ qdev->req_producer_index = 0;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrHigh,
+ qdev->req_consumer_index_phy_addr_high);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrLow,
+ qdev->req_consumer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrHigh,
+ MS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrLow,
+ LS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+ /* Response Queue Registers */
+ *((__le16 *) (qdev->prsp_producer_index)) = 0;
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrHigh,
+ qdev->rsp_producer_index_phy_addr_high);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrLow,
+ qdev->rsp_producer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrHigh,
+ MS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrLow,
+ LS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+ /* Large Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrHigh,
+ MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrLow,
+ LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQLength,
+ qdev->num_lbufq_entries);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeBufferLength,
+ qdev->lrg_buffer_len);
+
+ /* Small Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrHigh,
+ MS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrLow,
+ LS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallBufferLength,
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+ qdev->small_buf_release_cnt = 8;
+ qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
+ qdev->lrg_buf_release_cnt = 8;
+ qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
+ qdev->small_buf_index = 0;
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_free_count = 0;
+ qdev->lrg_buf_free_head = NULL;
+ qdev->lrg_buf_free_tail = NULL;
+
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxLargeQProducerIndex,
+ qdev->lrg_buf_q_producer_index);
+
+ /*
+ * Find out if the chip has already been initialized. If it has, then
+ * we skip some of the initialization.
+ */
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if ((value & PORT_STATUS_IC) == 0) {
+
+ /* Chip has not been configured yet, so let it rip. */
+ if (ql_init_misc_registers(qdev)) {
+ status = -1;
+ goto out;
+ }
+
+ value = qdev->nvram_data.tcpMaxWindowSize;
+ ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+ value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+ if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 13)) {
+ status = -1;
+ goto out;
+ }
+ ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+ ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+ (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+ 16) | (INTERNAL_CHIP_SD |
+ INTERNAL_CHIP_WE)));
+ ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+ }
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev,
+ &port_regs->mac1MaxFrameLengthReg,
+ qdev->max_frame_size);
+ else
+ ql_write_page0_reg(qdev,
+ &port_regs->mac0MaxFrameLengthReg,
+ qdev->max_frame_size);
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ status = -1;
+ goto out;
+ }
+
+ PHY_Setup(qdev);
+ ql_init_scan_mode(qdev);
+ ql_get_phy_owner(qdev);
+
+ /* Load the MAC Configuration */
+
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[2] << 24)
+ | (qdev->ndev->dev_addr[3] << 16)
+ | (qdev->ndev->dev_addr[4] << 8)
+ | qdev->ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[0] << 8)
+ | qdev->ndev->dev_addr[1]));
+
+ /* Enable Primary MAC */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+ MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+ /* Clear Primary and Secondary IP addresses */
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ (qdev->mac_index << 2)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ ((qdev->mac_index << 2) + 1)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+ /* Indicate Configuration Complete */
+ ql_write_page0_reg(qdev,
+ &port_regs->portControl,
+ ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+ do {
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (value & PORT_STATUS_IC)
+ break;
+ spin_unlock_irq(&qdev->hw_lock);
+ msleep(500);
+ spin_lock_irq(&qdev->hw_lock);
+ } while (--delay);
+
+ if (delay == 0) {
+ netdev_err(qdev->ndev, "Hw Initialization timeout\n");
+ status = -1;
+ goto out;
+ }
+
+ /* Enable Ethernet Function */
+ if (qdev->device_id == QL3032_DEVICE_ID) {
+ value =
+ (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+ QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
+ QL3032_PORT_CONTROL_ET);
+ ql_write_page0_reg(qdev, &port_regs->functionControl,
+ ((value << 16) | value));
+ } else {
+ value =
+ (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+ PORT_CONTROL_HH);
+ ql_write_page0_reg(qdev, &port_regs->portControl,
+ ((value << 16) | value));
+ }
+
+
+out:
+ return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ int status = 0;
+ u16 value;
+ int max_wait_time;
+
+ set_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_DONE, &qdev->flags);
+
+ /*
+ * Issue soft reset to chip.
+ */
+ netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+ /* Wait 3 seconds for reset to complete. */
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Wait 10 milliseconds for reset to complete\n");
+
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = 5;
+ do {
+ value =
+ ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0)
+ break;
+
+ ssleep(1);
+ } while ((--max_wait_time));
+
+ /*
+ * Also, make sure that the Network Reset Interrupt bit has been
+ * cleared after the soft reset has taken place.
+ */
+ value =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "clearing RI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ }
+
+ if (max_wait_time == 0) {
+ /* Issue Force Soft Reset */
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_FSR << 16) |
+ ISP_CONTROL_FSR));
+ /*
+ * Wait until the firmware tells us the Force Soft Reset is
+ * done
+ */
+ max_wait_time = 5;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus);
+ if ((value & ISP_CONTROL_FSR) == 0)
+ break;
+ ssleep(1);
+ } while ((--max_wait_time));
+ }
+ if (max_wait_time == 0)
+ status = 1;
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ set_bit(QL_RESET_DONE, &qdev->flags);
+ return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value, port_status;
+ u8 func_number;
+
+ /* Get the function number */
+ value =
+ ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+ func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+ port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ switch (value & ISP_CONTROL_FN_MASK) {
+ case ISP_CONTROL_FN0_NET:
+ qdev->mac_index = 0;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+ qdev->PHYAddr = PORT0_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM0)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN1_NET:
+ qdev->mac_index = 1;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+ qdev->PHYAddr = PORT1_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM1)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN0_SCSI:
+ case ISP_CONTROL_FN1_SCSI:
+ default:
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Invalid function number, ispControlStatus = 0x%x\n",
+ value);
+ break;
+ }
+ qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct pci_dev *pdev = qdev->pdev;
+
+ netdev_info(ndev,
+ "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+ DRV_NAME, qdev->index, qdev->chip_rev_id,
+ qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+ qdev->pci_slot);
+ netdev_info(ndev, "%s Interface\n",
+ test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
+
+ /*
+ * Print PCI bus width/type.
+ */
+ netdev_info(ndev, "Bus interface is %s %s\n",
+ ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+ ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+ netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
+ qdev->mem_map_registers);
+ netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
+
+ netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+ struct net_device *ndev = qdev->ndev;
+ int retval = 0;
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ free_irq(qdev->pdev->irq, ndev);
+
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+
+ del_timer_sync(&qdev->adapter_timer);
+
+ napi_disable(&qdev->napi);
+
+ if (do_reset) {
+ int soft_reset;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_wait_for_drvr_lock(qdev)) {
+ soft_reset = ql_adapter_reset(qdev);
+ if (soft_reset) {
+ netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+ qdev->index);
+ }
+ netdev_err(ndev,
+ "Releasing driver lock via chip reset\n");
+ } else {
+ netdev_err(ndev,
+ "Could not acquire driver lock to do reset!\n");
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
+ ql_free_mem_resources(qdev);
+ return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+ struct net_device *ndev = qdev->ndev;
+ int err;
+ unsigned long irq_flags = IRQF_SHARED;
+ unsigned long hw_flags;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ netdev_err(ndev, "Unable to allocate buffers\n");
+ return -ENOMEM;
+ }
+
+ if (qdev->msi) {
+ if (pci_enable_msi(qdev->pdev)) {
+ netdev_err(ndev,
+ "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
+ qdev->msi = 0;
+ } else {
+ netdev_info(ndev, "MSI Enabled...\n");
+ set_bit(QL_MSI_ENABLED, &qdev->flags);
+ irq_flags &= ~IRQF_SHARED;
+ }
+ }
+
+ err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+ irq_flags, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev,
+ "Failed to reserve interrupt %d - already in use\n",
+ qdev->pdev->irq);
+ goto err_irq;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ err = ql_wait_for_drvr_lock(qdev);
+ if (err) {
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ netdev_err(ndev, "Releasing driver lock\n");
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+ } else {
+ netdev_err(ndev, "Could not acquire driver lock\n");
+ goto err_lock;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ napi_enable(&qdev->napi);
+ ql_enable_interrupts(qdev);
+ return 0;
+
+err_init:
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ free_irq(qdev->pdev->irq, ndev);
+err_irq:
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+ return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+ if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+ netdev_err(qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
+ rtnl_lock();
+ dev_close(qdev->ndev);
+ rtnl_unlock();
+ return -1;
+ }
+ return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+ msleep(50);
+
+ ql_adapter_down(qdev, QL_DO_RESET);
+ return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return ql_adapter_up(qdev);
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct sockaddr *addr = p;
+ unsigned long hw_flags;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[2] << 24) | (ndev->
+ dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ netdev_err(ndev, "Resetting...\n");
+ /*
+ * Stop the queues, we've got a problem.
+ */
+ netif_stop_queue(ndev);
+
+ /*
+ * Wake up the worker to process this event.
+ */
+ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
+}
+
+static void ql_reset_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, reset_work.work);
+ struct net_device *ndev = qdev->ndev;
+ u32 value;
+ struct ql_tx_buf_cb *tx_cb;
+ int max_wait_time, i;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+ if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ /*
+ * Loop through the active list and return the skb.
+ */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ int j;
+ tx_cb = &qdev->tx_buf[i];
+ if (tx_cb->skb) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "Freeing lost SKB\n");
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+ for (j = 1; j < tx_cb->seg_count; j++) {
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[j],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[j],
+ maplen),
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(tx_cb->skb);
+ tx_cb->skb = NULL;
+ }
+ }
+
+ netdev_err(ndev, "Clearing NRI after reset\n");
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ /*
+ * Wait the for Soft Reset to Complete.
+ */
+ max_wait_time = 10;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+
+ ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "reset completed\n");
+ break;
+ }
+
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "clearing NRI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->
+ CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI <<
+ 16) | ISP_CONTROL_RI));
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ } while (--max_wait_time);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (value & ISP_CONTROL_SR) {
+
+ /*
+ * Set the reset flags and clear the board again.
+ * Nothing else to do...
+ */
+ netdev_err(ndev,
+ "Timed out waiting for reset to complete\n");
+ netdev_err(ndev, "Do a reset\n");
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+ return;
+ }
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_NO_RESET);
+ }
+}
+
+static void ql_tx_timeout_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+ qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+ if (value & PORT_STATUS_64)
+ qdev->pci_width = 64;
+ else
+ qdev->pci_width = 32;
+ if (value & PORT_STATUS_X)
+ qdev->pci_x = 1;
+ else
+ qdev->pci_x = 0;
+ qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(unsigned long ptr)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
+}
+
+static const struct net_device_ops ql3xxx_netdev_ops = {
+ .ndo_open = ql3xxx_open,
+ .ndo_start_xmit = ql3xxx_send,
+ .ndo_stop = ql3xxx_close,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ql3xxx_set_mac_address,
+ .ndo_tx_timeout = ql3xxx_tx_timeout,
+};
+
+static int ql3xxx_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql3_adapter *qdev = NULL;
+ static int cards_found;
+ int uninitialized_var(pci_using_dac), err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("%s cannot enable PCI device\n", pci_name(pdev));
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ pci_using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ pr_err("%s no usable DMA configuration\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_out_free_regions;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pci_set_drvdata(pdev, ndev);
+
+ qdev = netdev_priv(ndev);
+ qdev->index = cards_found;
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ qdev->device_id = pci_entry->device;
+ qdev->port_link_state = LS_DOWN;
+ if (msi)
+ qdev->msi = 1;
+
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+ if (pci_using_dac)
+ ndev->features |= NETIF_F_HIGHDMA;
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+
+ qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
+ if (!qdev->mem_map_registers) {
+ pr_err("%s: cannot map device registers\n", pci_name(pdev));
+ err = -EIO;
+ goto err_out_free_ndev;
+ }
+
+ spin_lock_init(&qdev->adapter_lock);
+ spin_lock_init(&qdev->hw_lock);
+
+ /* Set driver entry points */
+ ndev->netdev_ops = &ql3xxx_netdev_ops;
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
+ ndev->watchdog_timeo = 5 * HZ;
+
+ netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+
+ ndev->irq = pdev->irq;
+
+ /* make sure the EEPROM is good */
+ if (ql_get_nvram_params(qdev)) {
+ pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+ __func__, qdev->index);
+ err = -EIO;
+ goto err_out_iounmap;
+ }
+
+ ql_set_mac_info(qdev);
+
+ /* Validate and set parameters */
+ if (qdev->mac_index) {
+ ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
+ } else {
+ ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
+ }
+
+ ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+ /* Record PCI bus information. */
+ ql_get_board_info(qdev);
+
+ /*
+ * Set the Maximum Memory Read Byte Count value. We do this to handle
+ * jumbo frames.
+ */
+ if (qdev->pci_x)
+ pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("%s: cannot register net device\n", pci_name(pdev));
+ goto err_out_iounmap;
+ }
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+ INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
+ INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
+
+ init_timer(&qdev->adapter_timer);
+ qdev->adapter_timer.function = ql3xxx_timer;
+ qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+ qdev->adapter_timer.data = (unsigned long)qdev;
+
+ if (!cards_found) {
+ pr_alert("%s\n", DRV_STRING);
+ pr_alert("Driver name: %s, Version: %s\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ ql_display_dev_info(ndev);
+
+ cards_found++;
+ return 0;
+
+err_out_iounmap:
+ iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+ free_netdev(ndev);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out:
+ return err;
+}
+
+static void ql3xxx_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ ql_disable_interrupts(qdev);
+
+ if (qdev->workqueue) {
+ cancel_delayed_work(&qdev->reset_work);
+ cancel_delayed_work(&qdev->tx_timeout_work);
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ iounmap(qdev->mem_map_registers);
+ pci_release_regions(pdev);
+ free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+ .name = DRV_NAME,
+ .id_table = ql3xxx_pci_tbl,
+ .probe = ql3xxx_probe,
+ .remove = ql3xxx_remove,
+};
+
+module_pci_driver(ql3xxx_driver);
diff --git a/kernel/drivers/net/ethernet/qlogic/qla3xxx.h b/kernel/drivers/net/ethernet/qlogic/qla3xxx.h
new file mode 100644
index 000000000..73e234366
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qla3xxx.h
@@ -0,0 +1,1189 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0 0x01
+#define OPCODE_OB_MAC_IOCB_FN2 0x21
+
+#define OPCODE_IB_MAC_IOCB 0xF9
+#define OPCODE_IB_3032_MAC_IOCB 0x09
+#define OPCODE_IB_IP_IOCB 0xFA
+#define OPCODE_IB_3032_IP_IOCB 0x0A
+
+#define OPCODE_FUNC_ID_MASK 0x30
+#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
+
+#define FN0_MA_BITS_MASK 0x00
+#define FN1_MA_BITS_MASK 0x80
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_REQ_MA 0xe0
+#define OB_MAC_IOCB_REQ_F 0x10
+#define OB_MAC_IOCB_REQ_X 0x08
+#define OB_MAC_IOCB_REQ_D 0x02
+#define OB_MAC_IOCB_REQ_I 0x01
+ u8 flags1;
+#define OB_3032MAC_IOCB_REQ_IC 0x04
+#define OB_3032MAC_IOCB_REQ_TC 0x02
+#define OB_3032MAC_IOCB_REQ_UC 0x01
+ u8 reserved0;
+
+ u32 transaction_id; /* opaque for hardware */
+ __le16 data_len;
+ u8 ip_hdr_off;
+ u8 ip_hdr_len;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved3;
+ __le32 reserved4;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_P 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ u32 transaction_id; /* opaque for hardware */
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+ u8 opcode;
+#define IB_MAC_IOCB_RSP_V 0x80
+ u8 flags;
+#define IB_MAC_IOCB_RSP_S 0x80
+#define IB_MAC_IOCB_RSP_H1 0x40
+#define IB_MAC_IOCB_RSP_H0 0x20
+#define IB_MAC_IOCB_RSP_B 0x10
+#define IB_MAC_IOCB_RSP_M 0x08
+#define IB_MAC_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le32 reserved;
+ __le32 ial_low;
+ __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+ u8 opcode;
+ __le16 flags;
+#define OB_IP_IOCB_REQ_O 0x100
+#define OB_IP_IOCB_REQ_H 0x008
+#define OB_IP_IOCB_REQ_U 0x004
+#define OB_IP_IOCB_REQ_D 0x002
+#define OB_IP_IOCB_REQ_I 0x001
+
+ u8 reserved0;
+
+ __le32 transaction_id;
+ __le16 data_len;
+ __le16 reserved1;
+ __le32 hncb_ptr_low;
+ __le32 hncb_ptr_high;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E 0x80000000
+#define OB_IP_IOCB_REQ_C 0x40000000
+#define OB_IP_IOCB_REQ_L 0x20000000
+#define OB_IP_IOCB_REQ_R 0x10000000
+
+struct ob_ip_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_H 0x10
+#define OB_MAC_IOCB_RSP_E 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ __le32 transaction_id;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_ip_iocb_rsp {
+ u8 opcode;
+#define IB_IP_IOCB_RSP_3032_V 0x80
+#define IB_IP_IOCB_RSP_3032_O 0x40
+#define IB_IP_IOCB_RSP_3032_I 0x20
+#define IB_IP_IOCB_RSP_3032_R 0x10
+ u8 flags;
+#define IB_IP_IOCB_RSP_S 0x80
+#define IB_IP_IOCB_RSP_H1 0x40
+#define IB_IP_IOCB_RSP_H0 0x20
+#define IB_IP_IOCB_RSP_B 0x10
+#define IB_IP_IOCB_RSP_M 0x08
+#define IB_IP_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le16 checksum;
+#define IB_IP_IOCB_RSP_3032_ICE 0x01
+#define IB_IP_IOCB_RSP_3032_CE 0x02
+#define IB_IP_IOCB_RSP_3032_NUC 0x04
+#define IB_IP_IOCB_RSP_3032_UDP 0x08
+#define IB_IP_IOCB_RSP_3032_TCP 0x10
+#define IB_IP_IOCB_RSP_3032_IPE 0x20
+ __le16 reserved;
+#define IB_IP_IOCB_RSP_R 0x01
+ __le32 ial_low;
+ __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+ u8 opcode;
+ u8 flags;
+ __le16 reserved0;
+ __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS 0x1e00
+#define PORT1_PHY_ADDRESS 0x1f00
+
+#define ETHERNET_CRC_SIZE 4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+#define PHY_ID_0_REG 2
+#define PHY_ID_1_REG 3
+
+#define PHY_OUI_1_MASK 0xfc00
+#define PHY_MODEL_MASK 0x03f0
+
+/* Address for the Agere Phy */
+#define MII_AGERE_ADDR_1 0x00001000
+#define MII_AGERE_ADDR_2 0x00001100
+
+/* 32-bit ispControlStatus */
+enum {
+ ISP_CONTROL_NP_MASK = 0x0003,
+ ISP_CONTROL_NP_PCSR = 0x0000,
+ ISP_CONTROL_NP_HMCR = 0x0001,
+ ISP_CONTROL_NP_LRAMCR = 0x0002,
+ ISP_CONTROL_NP_PSR = 0x0003,
+ ISP_CONTROL_RI = 0x0008,
+ ISP_CONTROL_CI = 0x0010,
+ ISP_CONTROL_PI = 0x0020,
+ ISP_CONTROL_IN = 0x0040,
+ ISP_CONTROL_BE = 0x0080,
+ ISP_CONTROL_FN_MASK = 0x0700,
+ ISP_CONTROL_FN0_NET = 0x0400,
+ ISP_CONTROL_FN0_SCSI = 0x0500,
+ ISP_CONTROL_FN1_NET = 0x0600,
+ ISP_CONTROL_FN1_SCSI = 0x0700,
+ ISP_CONTROL_LINK_DN_0 = 0x0800,
+ ISP_CONTROL_LINK_DN_1 = 0x1000,
+ ISP_CONTROL_FSR = 0x2000,
+ ISP_CONTROL_FE = 0x4000,
+ ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+ ISP_IMR_ENABLE_INT = 0x0004,
+ ISP_IMR_DISABLE_RESET_INT = 0x0008,
+ ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+ ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+ ISP_SERIAL_PORT_IF_CLK = 0x0001,
+ ISP_SERIAL_PORT_IF_CS = 0x0002,
+ ISP_SERIAL_PORT_IF_D0 = 0x0004,
+ ISP_SERIAL_PORT_IF_DI = 0x0008,
+ ISP_NVRAM_MASK = (0x000F << 16),
+ ISP_SERIAL_PORT_IF_WE = 0x0010,
+ ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+ ISP_SERIAL_PORT_IF_SCI = 0x0400,
+ ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+ ISP_SERIAL_PORT_IF_SCE = 0x1000,
+ ISP_SERIAL_PORT_IF_SDI = 0x2000,
+ ISP_SERIAL_PORT_IF_SDO = 0x4000,
+ ISP_SERIAL_PORT_IF_SDE = 0x8000,
+ ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+ QL_RESOURCE_MASK_BASE_CODE = 0x7,
+ QL_RESOURCE_BITS_BASE_CODE = 0x4,
+ QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+ QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+ QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+ QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+ QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+ QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+ QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+ QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+ QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+ QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+ * QL3XXX memory-mapped registers
+ * QL3XXX has 4 "pages" of registers, each page occupying
+ * 256 bytes. Each page has a "common" area at the start and then
+ * page-specific registers after that.
+ */
+struct ql3xxx_common_registers {
+ u32 MB0; /* Offset 0x00 */
+ u32 MB1; /* Offset 0x04 */
+ u32 MB2; /* Offset 0x08 */
+ u32 MB3; /* Offset 0x0c */
+ u32 MB4; /* Offset 0x10 */
+ u32 MB5; /* Offset 0x14 */
+ u32 MB6; /* Offset 0x18 */
+ u32 MB7; /* Offset 0x1c */
+ u32 flashBiosAddr;
+ u32 flashBiosData;
+ u32 ispControlStatus;
+ u32 ispInterruptMaskReg;
+ u32 serialPortInterfaceReg;
+ u32 semaphoreReg;
+ u32 reqQProducerIndex;
+ u32 rspQConsumerIndex;
+
+ u32 rxLargeQProducerIndex;
+ u32 rxSmallQProducerIndex;
+ u32 arcMadiCommand;
+ u32 arcMadiData;
+};
+
+enum {
+ EXT_HW_CONFIG_SP_MASK = 0x0006,
+ EXT_HW_CONFIG_SP_NONE = 0x0000,
+ EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+ EXT_HW_CONFIG_SP_ECC = 0x0004,
+ EXT_HW_CONFIG_SP_ECCx = 0x0006,
+ EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+ EXT_HW_CONFIG_SIZE_128M = 0x0000,
+ EXT_HW_CONFIG_SIZE_256M = 0x0020,
+ EXT_HW_CONFIG_SIZE_512M = 0x0040,
+ EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+ EXT_HW_CONFIG_PD = 0x0080,
+ EXT_HW_CONFIG_FW = 0x0200,
+ EXT_HW_CONFIG_US = 0x0400,
+ EXT_HW_CONFIG_DCS_MASK = 0x1800,
+ EXT_HW_CONFIG_DCS_9MA = 0x0000,
+ EXT_HW_CONFIG_DCS_15MA = 0x0800,
+ EXT_HW_CONFIG_DCS_18MA = 0x1000,
+ EXT_HW_CONFIG_DCS_24MA = 0x1800,
+ EXT_HW_CONFIG_DDS_MASK = 0x6000,
+ EXT_HW_CONFIG_DDS_9MA = 0x0000,
+ EXT_HW_CONFIG_DDS_15MA = 0x2000,
+ EXT_HW_CONFIG_DDS_18MA = 0x4000,
+ EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+ INTERNAL_CHIP_DM = 0x0001,
+ INTERNAL_CHIP_SD = 0x0002,
+ INTERNAL_CHIP_RAP_MASK = 0x000C,
+ INTERNAL_CHIP_RAP_RR = 0x0000,
+ INTERNAL_CHIP_RAP_NRM = 0x0004,
+ INTERNAL_CHIP_RAP_ERM = 0x0008,
+ INTERNAL_CHIP_RAP_ERMx = 0x000C,
+ INTERNAL_CHIP_WE = 0x0010,
+ INTERNAL_CHIP_EF = 0x0020,
+ INTERNAL_CHIP_FR = 0x0040,
+ INTERNAL_CHIP_FW = 0x0080,
+ INTERNAL_CHIP_FI = 0x0100,
+ INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+ PORT_CONTROL_DS = 0x0001,
+ PORT_CONTROL_HH = 0x0002,
+ PORT_CONTROL_EI = 0x0004,
+ PORT_CONTROL_ET = 0x0008,
+ PORT_CONTROL_EF = 0x0010,
+ PORT_CONTROL_DRM = 0x0020,
+ PORT_CONTROL_RLB = 0x0040,
+ PORT_CONTROL_RCB = 0x0080,
+ PORT_CONTROL_MAC = 0x0100,
+ PORT_CONTROL_IPV = 0x0200,
+ PORT_CONTROL_IFP = 0x0400,
+ PORT_CONTROL_ITP = 0x0800,
+ PORT_CONTROL_FI = 0x1000,
+ PORT_CONTROL_DFP = 0x2000,
+ PORT_CONTROL_OI = 0x4000,
+ PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+ PORT_STATUS_SM0 = 0x0001,
+ PORT_STATUS_SM1 = 0x0002,
+ PORT_STATUS_X = 0x0008,
+ PORT_STATUS_DL = 0x0080,
+ PORT_STATUS_IC = 0x0200,
+ PORT_STATUS_MRC = 0x0400,
+ PORT_STATUS_NL = 0x0800,
+ PORT_STATUS_REV_ID_MASK = 0x7000,
+ PORT_STATUS_REV_ID_1 = 0x1000,
+ PORT_STATUS_REV_ID_2 = 0x2000,
+ PORT_STATUS_REV_ID_3 = 0x3000,
+ PORT_STATUS_64 = 0x8000,
+ PORT_STATUS_UP0 = 0x10000,
+ PORT_STATUS_AC0 = 0x20000,
+ PORT_STATUS_AE0 = 0x40000,
+ PORT_STATUS_UP1 = 0x100000,
+ PORT_STATUS_AC1 = 0x200000,
+ PORT_STATUS_AE1 = 0x400000,
+ PORT_STATUS_F0_ENABLED = 0x1000000,
+ PORT_STATUS_F1_ENABLED = 0x2000000,
+ PORT_STATUS_F2_ENABLED = 0x4000000,
+ PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+ MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+ MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+ MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+ MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_MII_CONTROL_RC = 0x0001,
+ MAC_MII_CONTROL_SC = 0x0002,
+ MAC_MII_CONTROL_AS = 0x0004,
+ MAC_MII_CONTROL_NP = 0x0008,
+ MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+ MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+ MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+ MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+ MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+ MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+ MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+ MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+ MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+ MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+ MAC_MII_STATUS_BSY = 0x0001,
+ MAC_MII_STATUS_SC = 0x0002,
+ MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+ MAC_CONFIG_REG_PE = 0x0001,
+ MAC_CONFIG_REG_TF = 0x0002,
+ MAC_CONFIG_REG_RF = 0x0004,
+ MAC_CONFIG_REG_FD = 0x0008,
+ MAC_CONFIG_REG_GM = 0x0010,
+ MAC_CONFIG_REG_LB = 0x0020,
+ MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+ MAC_HALF_DUPLEX_REG_ED = 0x10000,
+ MAC_HALF_DUPLEX_REG_NB = 0x20000,
+ MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+ MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+ IP_ADDR_INDEX_REG_MASK = 0x000f,
+ IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+ IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+ IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+ IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+ IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+ IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+ IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+ IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+ IP_ADDR_INDEX_REG_6 = 0x0008,
+ IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
+ IP_ADDR_INDEX_REG_E = 0x0040,
+};
+enum {
+ QL3032_PORT_CONTROL_DS = 0x0001,
+ QL3032_PORT_CONTROL_HH = 0x0002,
+ QL3032_PORT_CONTROL_EIv6 = 0x0004,
+ QL3032_PORT_CONTROL_EIv4 = 0x0008,
+ QL3032_PORT_CONTROL_ET = 0x0010,
+ QL3032_PORT_CONTROL_EF = 0x0020,
+ QL3032_PORT_CONTROL_DRM = 0x0040,
+ QL3032_PORT_CONTROL_RLB = 0x0080,
+ QL3032_PORT_CONTROL_RCB = 0x0100,
+ QL3032_PORT_CONTROL_KIE = 0x0200,
+};
+
+enum {
+ PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+ PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+ PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+ PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+ PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+ PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+ PROBE_MUX_ADDR_REG_UP = 0x4000,
+ PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+ STATISTICS_INDEX_REG_MASK = 0x01ff,
+ STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+ STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+ STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+ STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+ STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+ STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+ STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+ STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+ STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+ STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+ STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+ STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+ STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+ STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+ STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+ STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+ STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+ STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+ STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+ STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+ STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+ STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+ STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+ STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+ STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+ STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+ STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+ STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+ PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+ PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+ PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+ PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+ PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+ PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+ PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+ PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+ PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+ PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+ PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+ PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+ PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+ PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+ PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+ PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+ PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+ PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+ PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+ PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+ PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ * port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 ExternalHWConfig;
+ u32 InternalChipConfig;
+ u32 portControl;
+ u32 portStatus;
+ u32 macAddrIndirectPtrReg;
+ u32 macAddrDataReg;
+ u32 macMIIMgmtControlReg;
+ u32 macMIIMgmtAddrReg;
+ u32 macMIIMgmtDataReg;
+ u32 macMIIStatusReg;
+ u32 mac0ConfigReg;
+ u32 mac0IpgIfgReg;
+ u32 mac0HalfDuplexReg;
+ u32 mac0MaxFrameLengthReg;
+ u32 mac0PauseThresholdReg;
+ u32 mac1ConfigReg;
+ u32 mac1IpgIfgReg;
+ u32 mac1HalfDuplexReg;
+ u32 mac1MaxFrameLengthReg;
+ u32 mac1PauseThresholdReg;
+ u32 ipAddrIndexReg;
+ u32 ipAddrDataReg;
+ u32 ipReassemblyTimeout;
+ u32 tcpMaxWindow;
+ u32 currentTcpTimestamp[2];
+ u32 internalRamRWAddrReg;
+ u32 internalRamWDataReg;
+ u32 reclaimedBufferAddrRegLow;
+ u32 reclaimedBufferAddrRegHigh;
+ u32 tcpConfiguration;
+ u32 functionControl;
+ u32 fpgaRevID;
+ u32 localRamAddr;
+ u32 localRamDataAutoIncr;
+ u32 localRamDataNonIncr;
+ u32 gpOutput;
+ u32 gpInput;
+ u32 probeMuxAddr;
+ u32 probeMuxData;
+ u32 statisticsIndexReg;
+ u32 statisticsReadDataRegAutoIncr;
+ u32 statisticsReadDataRegNoIncr;
+ u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 reserved[12];
+
+ /* Network Request Queue */
+ u32 reqConsumerIndex;
+ u32 reqConsumerIndexAddrLow;
+ u32 reqConsumerIndexAddrHigh;
+ u32 reqBaseAddrLow;
+ u32 reqBaseAddrHigh;
+ u32 reqLength;
+
+ /* Network Completion Queue */
+ u32 rspProducerIndex;
+ u32 rspProducerIndexAddrLow;
+ u32 rspProducerIndexAddrHigh;
+ u32 rspBaseAddrLow;
+ u32 rspBaseAddrHigh;
+ u32 rspLength;
+
+ /* RX Large Buffer Queue */
+ u32 rxLargeQConsumerIndex;
+ u32 rxLargeQBaseAddrLow;
+ u32 rxLargeQBaseAddrHigh;
+ u32 rxLargeQLength;
+ u32 rxLargeBufferLength;
+
+ /* RX Small Buffer Queue */
+ u32 rxSmallQConsumerIndex;
+ u32 rxSmallQBaseAddrLow;
+ u32 rxSmallQBaseAddrHigh;
+ u32 rxSmallQLength;
+ u32 rxSmallBufferLength;
+
+};
+
+/*
+ * port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+ struct ql3xxx_common_registers CommonRegs;
+ u32 bufletSize;
+ u32 maxBufletCount;
+ u32 currentBufletCount;
+ u32 reserved;
+ u32 freeBufletThresholdLow;
+ u32 freeBufletThresholdHigh;
+ u32 ipHashTableBase;
+ u32 ipHashTableCount;
+ u32 tcpHashTableBase;
+ u32 tcpHashTableCount;
+ u32 ncbBase;
+ u32 maxNcbCount;
+ u32 currentNcbCount;
+ u32 drbBase;
+ u32 maxDrbCount;
+ u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+ CONTROL_REG = 0,
+ STATUS_REG = 1,
+ PHY_STAT_LINK_UP = 0x0004,
+ PHY_CTRL_LOOPBACK = 0x4000,
+
+ PETBI_CONTROL_REG = 0x00,
+ PETBI_CTRL_ALL_PARAMS = 0x7140,
+ PETBI_CTRL_SOFT_RESET = 0x8000,
+ PETBI_CTRL_AUTO_NEG = 0x1000,
+ PETBI_CTRL_RESTART_NEG = 0x0200,
+ PETBI_CTRL_FULL_DUPLEX = 0x0100,
+ PETBI_CTRL_SPEED_1000 = 0x0040,
+
+ PETBI_STATUS_REG = 0x01,
+ PETBI_STAT_NEG_DONE = 0x0020,
+ PETBI_STAT_LINK_UP = 0x0004,
+
+ PETBI_NEG_ADVER = 0x04,
+ PETBI_NEG_PAUSE = 0x0080,
+ PETBI_NEG_PAUSE_MASK = 0x0180,
+ PETBI_NEG_DUPLEX = 0x0020,
+ PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+ PETBI_NEG_PARTNER = 0x05,
+ PETBI_NEG_ERROR_MASK = 0x3000,
+
+ PETBI_EXPANSION_REG = 0x06,
+ PETBI_EXP_PAGE_RX = 0x0002,
+
+ PHY_GIG_CONTROL = 9,
+ PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/
+ PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/
+ PHY_GIG_ALL_PARAMS = 0x0300,
+ PHY_GIG_ADV_1000F = 0x0200,
+ PHY_GIG_ADV_1000H = 0x0100,
+
+ PHY_NEG_ADVER = 4,
+ PHY_NEG_ALL_PARAMS = 0x0fe0,
+ PHY_NEG_ASY_PAUSE = 0x0800,
+ PHY_NEG_SYM_PAUSE = 0x0400,
+ PHY_NEG_ADV_SPEED = 0x01e0,
+ PHY_NEG_ADV_100F = 0x0100,
+ PHY_NEG_ADV_100H = 0x0080,
+ PHY_NEG_ADV_10F = 0x0040,
+ PHY_NEG_ADV_10H = 0x0020,
+
+ PETBI_TBI_CTRL = 0x11,
+ PETBI_TBI_RESET = 0x8000,
+ PETBI_TBI_AUTO_SENSE = 0x0100,
+ PETBI_TBI_SERDES_MODE = 0x0010,
+ PETBI_TBI_SERDES_WRAP = 0x0002,
+
+ AUX_CONTROL_STATUS = 0x1c,
+ PHY_AUX_NEG_DONE = 0x8000,
+ PHY_NEG_PARTNER = 5,
+ PHY_AUX_DUPLEX_STAT = 0x0020,
+ PHY_AUX_SPEED_STAT = 0x0018,
+ PHY_AUX_NO_HW_STRAP = 0x0004,
+ PHY_AUX_RESET_STICK = 0x0002,
+ PHY_NEG_PAUSE = 0x0400,
+ PHY_CTRL_SOFT_RESET = 0x8000,
+ PHY_CTRL_AUTO_NEG = 0x1000,
+ PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions */
+ FM93C56A_START = 0x1,
+/* Commands */
+ FM93C56A_READ = 0x2,
+ FM93C56A_WEN = 0x0,
+ FM93C56A_WRITE = 0x1,
+ FM93C56A_WRITE_ALL = 0x0,
+ FM93C56A_WDS = 0x0,
+ FM93C56A_ERASE = 0x3,
+ FM93C56A_ERASE_ALL = 0x0,
+/* Command Extensions */
+ FM93C56A_WEN_EXT = 0x3,
+ FM93C56A_WRITE_ALL_EXT = 0x1,
+ FM93C56A_WDS_EXT = 0x0,
+ FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+ FM93C56A_READ_DUMMY_BITS = 1,
+ FM93C56A_READY = 0,
+ FM93C56A_BUSY = 1,
+ FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions */
+ FM93C56A_SIZE_8 = 0x100,
+ FM93C56A_SIZE_16 = 0x80,
+ FM93C66A_SIZE_8 = 0x200,
+ FM93C66A_SIZE_16 = 0x100,
+ FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+ FM93C56A_NO_ADDR_BITS_16 = 8,
+ FM93C56A_NO_ADDR_BITS_8 = 9,
+ FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+ FM93C56A_DATA_BITS_16 = 16,
+ FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+ AUBURN_EEPROM_DI = 0x8,
+ AUBURN_EEPROM_DI_0 = 0x0,
+ AUBURN_EEPROM_DI_1 = 0x8,
+ AUBURN_EEPROM_DO = 0x4,
+ AUBURN_EEPROM_DO_0 = 0x0,
+ AUBURN_EEPROM_DO_1 = 0x4,
+ AUBURN_EEPROM_CS = 0x2,
+ AUBURN_EEPROM_CS_0 = 0x0,
+ AUBURN_EEPROM_CS_1 = 0x2,
+ AUBURN_EEPROM_CLK_RISE = 0x1,
+ AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+ EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+ EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ * MAC Config data structure
+ */
+ struct eeprom_port_cfg {
+ u16 etherMtu_mac;
+ u16 pauseThreshold_mac;
+ u16 resumeThreshold_mac;
+ u16 portConfiguration;
+#define PORT_CONFIG_DEFAULT 0xf700
+#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
+#define PORT_CONFIG_1000MB_SPEED 0x0400
+#define PORT_CONFIG_100MB_SPEED 0x0200
+#define PORT_CONFIG_10MB_SPEED 0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
+ u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+ u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+ u8 bootID0:7, boodID0Valid:1;
+ u8 bootLun0[8];
+
+ u8 bootID1:7, boodID1Valid:1;
+ u8 bootLun1[8];
+
+ u16 MaxLunsTrgt;
+ u8 reserved[10];
+};
+
+/*
+ * Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+ u8 reserved[30];
+ u16 macAddress[3];
+ u16 macAddressSecondary[3];
+
+ u16 subsysVendorId;
+ u16 subsysDeviceId;
+};
+
+/*
+ * EEPROM format
+ */
+struct eeprom_data {
+ u8 asicId[4];
+ u16 version_and_numPorts; /* together to avoid endianness crap */
+ u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE 16
+#define EEPROM_SERIAL_NUM_SIZE 16
+
+ u8 boardIdStr[16];
+ u8 serialNumber[16];
+ u16 extHwConfig;
+ struct eeprom_port_cfg macCfg_port0;
+ struct eeprom_port_cfg macCfg_port1;
+ u16 bufletSize;
+ u16 bufletCount;
+ u16 tcpWindowThreshold50;
+ u16 tcpWindowThreshold25;
+ u16 tcpWindowThreshold0;
+ u16 ipHashTableBaseHi;
+ u16 ipHashTableBaseLo;
+ u16 ipHashTableSize;
+ u16 tcpHashTableBaseHi;
+ u16 tcpHashTableBaseLo;
+ u16 tcpHashTableSize;
+ u16 ncbTableBaseHi;
+ u16 ncbTableBaseLo;
+ u16 ncbTableSize;
+ u16 drbTableBaseHi;
+ u16 drbTableBaseLo;
+ u16 drbTableSize;
+ u16 reserved_142[4];
+ u16 ipReassemblyTimeout;
+ u16 tcpMaxWindowSize;
+ u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+ u8 reserved_156[294];
+ u16 qDebug[8];
+ struct eeprom_function_cfg funcCfg_fn0;
+ u16 reserved_510;
+ u8 oemSpace[432];
+ struct eeprom_bios_cfg biosCfg_fn1;
+ struct eeprom_function_cfg funcCfg_fn1;
+ u16 reserved_1022;
+ u8 reserved_1024[464];
+ struct eeprom_function_cfg funcCfg_fn2;
+ u16 reserved_1534;
+ u8 reserved_1536[432];
+ struct eeprom_bios_cfg biosCfg_fn3;
+ struct eeprom_function_cfg funcCfg_fn3;
+ u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID 0x1077
+#define QL3022_DEVICE_ID 0x3022
+#define QL3032_DEVICE_ID 0x3032
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE ETH_DATA_LEN
+#define JUMBO_MTU_SIZE 9000
+#define VLAN_ID_LEN 2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES 128
+#define JUMBO_NUM_LBUFQ_ENTRIES 32
+#define NUM_SBUFQ_ENTRIES 64
+#define QL_SMALL_BUFFER_SIZE 32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+ /* Each send has at least control block. This is how many we keep. */
+#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+
+#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+ __le32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK 0x00000003
+ __le32 addr0_upper;
+ __le32 addr1_lower;
+ __le32 addr1_upper;
+ __le32 addr2_lower;
+ __le32 addr2_upper;
+ __le32 addr3_lower;
+ __le32 addr3_upper;
+ __le32 addr4_lower;
+ __le32 addr4_upper;
+ __le32 addr5_lower;
+ __le32 addr5_upper;
+ __le32 addr6_lower;
+ __le32 addr6_upper;
+ __le32 addr7_lower;
+ __le32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
+#define QL_NO_RESET 0
+#define QL_DO_RESET 1
+
+enum link_state_t {
+ LS_UNKNOWN = 0,
+ LS_DOWN,
+ LS_DEGRADE,
+ LS_RECOVER,
+ LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+ struct ql_rcv_buf_cb *next;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+ __le32 buf_phy_addr_low;
+ __le32 buf_phy_addr_high;
+ int index;
+};
+
+/*
+ * Original IOCB has 3 sg entries:
+ * first points to skb-data area
+ * second points to first frag
+ * third points to next oal.
+ * OAL has 5 entries:
+ * 1 thru 4 point to frags
+ * fifth points to next oal.
+ */
+#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
+
+struct oal_entry {
+ __le32 dma_lo;
+ __le32 dma_hi;
+ __le32 len;
+#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
+#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
+};
+
+struct oal {
+ struct oal_entry oal_entry[5];
+};
+
+struct map_list {
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct ql_tx_buf_cb {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry ;
+ int seg_count;
+ struct oal *oal;
+ struct map_list map[MAX_SKB_FRAGS+1];
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB 0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1, /* Reset finished. */
+ QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
+ QL_RESET_START = 3, /* Please reset the chip. */
+ QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
+ QL_TX_TIMEOUT = 5, /* Timeout in progress. */
+ QL_LINK_MASTER = 6, /* This driver controls the link. */
+ QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
+ QL_THREAD_UP = 8, /* This flag is available. */
+ QL_LINK_UP = 9, /* Link Status. */
+ QL_ALLOC_REQ_RSP_Q_DONE = 10,
+ QL_ALLOC_BUFQS_DONE = 11,
+ QL_ALLOC_SMALL_BUF_DONE = 12,
+ QL_LINK_OPTICAL = 13,
+ QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+ u32 reserved_00;
+ unsigned long flags;
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ struct napi_struct napi;
+
+ /* Hardware information */
+ u8 chip_rev_id;
+ u8 pci_slot;
+ u8 pci_width;
+ u8 pci_x;
+ u32 msi;
+ int index;
+ struct timer_list adapter_timer; /* timer used for various functions */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */
+ struct ql3xxx_port_registers __iomem *mem_map_registers;
+ u32 current_page; /* tracks current register page */
+
+ u32 msg_enable;
+ u8 reserved_01[2];
+ u8 reserved_02[2];
+
+ /* Page for Shadow Registers */
+ void *shadow_reg_virt_addr;
+ dma_addr_t shadow_reg_phy_addr;
+
+ /* Net Request Queue */
+ u32 req_q_size;
+ u32 reserved_03;
+ struct ob_mac_iocb_req *req_q_virt_addr;
+ dma_addr_t req_q_phy_addr;
+ u16 req_producer_index;
+ u16 reserved_04;
+ u16 *preq_consumer_index;
+ u32 req_consumer_index_phy_addr_high;
+ u32 req_consumer_index_phy_addr_low;
+ atomic_t tx_count;
+ struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+ /* Net Response Queue */
+ u32 rsp_q_size;
+ u32 eeprom_cmd_data;
+ struct net_rsp_iocb *rsp_q_virt_addr;
+ dma_addr_t rsp_q_phy_addr;
+ struct net_rsp_iocb *rsp_current;
+ u16 rsp_consumer_index;
+ u16 reserved_06;
+ volatile __le32 *prsp_producer_index;
+ u32 rsp_producer_index_phy_addr_high;
+ u32 rsp_producer_index_phy_addr_low;
+
+ /* Large Buffer Queue */
+ u32 lrg_buf_q_alloc_size;
+ u32 lrg_buf_q_size;
+ void *lrg_buf_q_alloc_virt_addr;
+ void *lrg_buf_q_virt_addr;
+ dma_addr_t lrg_buf_q_alloc_phy_addr;
+ dma_addr_t lrg_buf_q_phy_addr;
+ u32 lrg_buf_q_producer_index;
+ u32 lrg_buf_release_cnt;
+ struct bufq_addr_element *lrg_buf_next_free;
+ u32 num_large_buffers;
+ u32 num_lbufq_entries;
+
+ /* Large (Receive) Buffers */
+ struct ql_rcv_buf_cb *lrg_buf;
+ struct ql_rcv_buf_cb *lrg_buf_free_head;
+ struct ql_rcv_buf_cb *lrg_buf_free_tail;
+ u32 lrg_buf_free_count;
+ u32 lrg_buffer_len;
+ u32 lrg_buf_index;
+ u32 lrg_buf_skb_check;
+
+ /* Small Buffer Queue */
+ u32 small_buf_q_alloc_size;
+ u32 small_buf_q_size;
+ u32 small_buf_q_producer_index;
+ void *small_buf_q_alloc_virt_addr;
+ void *small_buf_q_virt_addr;
+ dma_addr_t small_buf_q_alloc_phy_addr;
+ dma_addr_t small_buf_q_phy_addr;
+ u32 small_buf_index;
+
+ /* Small (Receive) Buffers */
+ void *small_buf_virt_addr;
+ dma_addr_t small_buf_phy_addr;
+ u32 small_buf_phy_addr_low;
+ u32 small_buf_phy_addr_high;
+ u32 small_buf_release_cnt;
+ u32 small_buf_total_size;
+
+ struct eeprom_data nvram_data;
+ u32 port_link_state;
+
+ /* 4022 specific */
+ u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+ u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+ u32 mac_ob_opcode; /* Opcode to use on mac transmission */
+ u32 mb_bit_mask; /* MA Bits mask to use on transmission */
+ u32 numPorts;
+ struct workqueue_struct *workqueue;
+ struct delayed_work reset_work;
+ struct delayed_work tx_timeout_work;
+ struct delayed_work link_state_work;
+ u32 max_frame_size;
+ u32 device_id;
+ u16 phyType;
+};
+
+#endif /* _QLA3XXX_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/Makefile b/kernel/drivers/net/ethernet/qlogic/qlcnic/Makefile
new file mode 100644
index 000000000..3c2c2c7c1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
new file mode 100644
index 000000000..f221126a5
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -0,0 +1,2404 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 62
+#define QLCNIC_LINUX_VERSIONID "5.3.62"
+#define QLCNIC_DRV_IDC_VER 0x01
+#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
+ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+#define QLCNIC_P3P_C0 0x58
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3P_MAX_MTU (9600)
+#define P3P_MIN_MTU (68)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
+
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
+#define MAX_RDS_RINGS 2
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 encap_descr; /* 15:10 offset of outer L3 header,
+ * 9:6 number of 32bit words in outer L3 header,
+ * 5 offload outer L4 checksum,
+ * 4 offload outer L3 checksum,
+ * 3 Inner L4 type, TCP=0, UDP=1,
+ * 2 Inner L3 type, IPv4=0, IPv6=1,
+ * 1 Outer L3 type,IPv4=0, IPv6=1,
+ * 0 type of encapsulation, GRE=0, VXLAN=1
+ */
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u8 outer_hdr_length; /* Encapsulation only */
+ u8 rsvd1;
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ u8 eth_addr[ETH_ALEN];
+ __le16 vlan_TCI; /* In case of encapsulation,
+ * this is for outer VLAN
+ */
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+} __packed;
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
+};
+
+struct uni_data_desc{
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
+};
+
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
+#define QLCNIC_B0_FW_IMAGE_REGION 0x74
+#define QLCNIC_C0_FW_IMAGE_REGION 0x97
+#define QLCNIC_BOOTLD_REGION 0X72
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_addr;
+};
+
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u8 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
+
+#define QLCNIC_MSIX_TABLE_OFFSET 0x44
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ u16 ref_handle;
+ struct sk_buff *skb;
+ struct list_head list;
+ u64 dma;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_INTR_COAL_TYPE_RX 1
+#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
+
+#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64
+#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64
+
+#define QLCNIC_INTR_DEFAULT 0x04
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 2
+
+struct qlcnic_nic_intr_coalesce {
+ u8 type;
+ u8 sts_ring_mask;
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ u16 flag;
+ u32 timer_out;
+};
+
+struct qlcnic_83xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
+ u32 rsvd[0];
+};
+
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[0];
+};
+
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
+struct qlcnic_fw_dump {
+ u8 clr; /* flag to indicate if dump is cleared */
+ bool enable; /* enable/disable dump */
+ u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
+ void *data; /* dump data area */
+ void *tmpl_hdr;
+ dma_addr_t phys_addr;
+ void *dma_buffer;
+ bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
+};
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u8 loopback_state;
+ u8 beacon_state;
+ u8 has_link_events;
+ u8 fw_type;
+ u8 physical_port;
+ u8 reset_context;
+ u8 msix_supported;
+ u8 max_mac_filters;
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 diag_test;
+ u8 num_msix;
+ u8 nic_mode;
+ int diag_cnt;
+
+ u16 max_uc_count;
+ u16 port_type;
+ u16 board_type;
+ u16 supported_type;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u16 op_mode;
+ u16 switch_mode;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 max_mtu;
+ u32 msg_enable;
+ u16 total_nic_func;
+ u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
+
+ u32 capabilities;
+ u32 extra_capability[3];
+ u32 temp;
+ u32 int_vec_bit;
+ u32 fw_hal_version;
+ u32 port_config;
+ struct qlcnic_hardware_ops *hw_ops;
+ struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info *fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ struct qlcnic_sriov *sriov;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
+ u16 vxlan_port;
+ struct device *hwmon_dev;
+ u32 post_mode;
+ bool run_post;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+ u64 lrobytes;
+ u64 lso_frames;
+ u64 encap_lso_frames;
+ u64 encap_tx_csummed;
+ u64 encap_rx_csummed;
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 skb_alloc_failure;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ u32 num_desc;
+ u32 producer;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ void __iomem *crb_intr_mask;
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ + 12];
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
+struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ + 12];
+ u16 ctx_id;
+
+ u32 state;
+ u32 producer;
+ u32 sw_consumer;
+ u32 num_desc;
+
+ struct qlcnic_tx_queue_stats tx_stats;
+
+ void __iomem *crb_cmd_producer;
+ struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ __le32 *hw_consumer;
+
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+ struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
+#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
+
+/*
+ * Context state
+ */
+#define QLCNIC_HOST_CTX_STATE_FREED 0
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} __packed;
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 valid_field_offset;
+ u8 txrx_sds_binding;
+ u8 msix_handler;
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} __packed;
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} __packed;
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} __packed;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3P 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD 3
+#define QLCNIC_MAC_VLAN_DEL 4
+
+enum qlcnic_mac_type {
+ QLCNIC_UNICAST_MAC,
+ QLCNIC_MULTICAST_MAC,
+ QLCNIC_BROADCAST_MAC,
+};
+
+struct qlcnic_mac_vlan_list {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
+ enum qlcnic_mac_type mac_type;
+};
+
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+#define QLCNIC_ILB_MODE 0x1
+#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
+
+#define QLCNIC_LINKEVENT 0x1
+#define QLCNIC_LB_RESPONSE 0x2
+#define QLCNIC_IS_LB_CONFIGURED(VAL) \
+ (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
+
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_TSO BIT_1
+#define QLCNIC_FW_CAPABILITY_BDG BIT_8
+#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
+#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
+#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
+#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} __packed;
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+struct qlcnic_vlan_req {
+ __le16 vlan_id;
+ __le16 rsvd[3];
+} __packed;
+
+struct qlcnic_ipaddr {
+ __be32 ipv4;
+ __be32 ipv6[4];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x01
+#define QLCNIC_LRO_DISABLED 0x00
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_ESWITCH_ENABLED 0x40
+#define QLCNIC_ADAPTER_INITIALIZED 0x80
+#define QLCNIC_TAGGING_ENABLED 0x100
+#define QLCNIC_MACSPOOF 0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
+#define QLCNIC_PROMISC_DISABLED 0x800
+#define QLCNIC_NEED_FLR 0x1000
+#define QLCNIC_FW_RESET_OWNER 0x2000
+#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
+#define QLCNIC_TX_INTR_SHARED 0x10000
+#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
+
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLCNIC_ADD_VXLAN_PORT 0x100000
+#define QLCNIC_DEL_VXLAN_PORT 0x200000
+#endif
+
+#define QLCNIC_VLAN_FILTERING 0x800000
+
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter) \
+ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
+#define QLCNIC_BEACON_EANBLE 0xC
+#define QLCNIC_BEACON_DISABLE 0xD
+
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
+
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+#define QLCNIC_MSIX_TBL_PGSIZE 4096
+
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+#define __QLCNIC_AER 5
+#define __QLCNIC_DIAG_RES_ALLOC 6
+#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
+#define __QLCNIC_SRIOV_ENABLE 10
+#define __QLCNIC_SRIOV_CAPABLE 11
+#define __QLCNIC_MBX_POLL_ENABLE 12
+#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_MAINTENANCE_MODE 16
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_LED_TEST 3
+
+#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
+#define QLCNIC_LB_MAX_FILTERS 64
+#define QLCNIC_LB_BUCKET_SIZE 32
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+
+struct qlcnic_filter {
+ struct hlist_node fnode;
+ u8 faddr[ETH_ALEN];
+ u16 vlan_id;
+ unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+ struct hlist_head *fhead;
+ u8 fnum;
+ u16 fmax;
+ u16 fbucket_size;
+};
+
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ atomic_t rsp_status;
+ u32 num_cmds;
+};
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ unsigned long state;
+ u32 flags;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
+
+ u8 max_rds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
+ u8 rx_csum;
+ u8 portnum;
+
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+ u8 reset_ctx_cnt;
+
+ u16 is_up;
+ u16 rx_pvid;
+ u16 tx_pvid;
+
+ u32 irq;
+ u32 heartbeat;
+
+ u8 dev_state;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
+
+ u8 mac_addr[ETH_ALEN];
+
+ u64 dev_rst_time;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
+ bool rx_mac_learn;
+ unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
+ struct qlcnic_npar_info *npars;
+ struct qlcnic_eswitch *eswitch;
+ struct qlcnic_nic_template *nic_ops;
+
+ struct qlcnic_adapter_stats stats;
+ struct list_head mac_list;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
+ struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
+ struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
+
+ struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
+ struct list_head vf_mc_list;
+
+ spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
+ u32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ u32 offload_flags;
+ const struct firmware *fw;
+};
+
+struct qlcnic_info_le {
+ __le16 pci_func;
+ __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+ __le16 phys_port;
+ __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+ __le32 capabilities;
+ u8 max_mac_filters;
+ u8 reserved1;
+ __le16 max_mtu;
+
+ __le16 max_tx_ques;
+ __le16 max_rx_ques;
+ __le16 min_tx_bw;
+ __le16 max_tx_bw;
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ __le16 linkstate_reg_offset;
+ __le16 bit_offsets;
+ __le16 max_local_ipv6_addrs;
+ __le16 max_remote_ipv6_addrs;
+ u8 reserved2[56];
+} __packed;
+
+struct qlcnic_info {
+ u16 pci_func;
+ u16 op_mode;
+ u16 phys_port;
+ u16 switch_mode;
+ u32 capabilities;
+ u8 max_mac_filters;
+ u16 max_mtu;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 min_tx_bw;
+ u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
+ u16 linkstate_reg_offset;
+ u16 bit_offsets;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_pci_info_le {
+ __le16 id; /* pci function id */
+ __le16 active; /* 1 = Enabled */
+ __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+ __le16 default_port; /* default port number */
+
+ __le16 tx_min_bw; /* Multiple of 100mbpc */
+ __le16 tx_max_bw;
+ __le16 reserved1[2];
+
+ u8 mac[ETH_ALEN];
+ __le16 func_count;
+ u8 reserved2[104];
+
+} __packed;
+
+struct qlcnic_pci_info {
+ u16 id;
+ u16 active;
+ u16 type;
+ u16 default_port;
+ u16 tx_min_bw;
+ u16 tx_max_bw;
+ u8 mac[ETH_ALEN];
+ u16 func_count;
+};
+
+struct qlcnic_npar_info {
+ bool eswitch_status;
+ u16 pvid;
+ u16 min_bw;
+ u16 max_bw;
+ u8 phy_port;
+ u8 type;
+ u8 active;
+ u8 enable_pm;
+ u8 dest_npar;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 promisc_mode;
+ u8 offload_flags;
+ u8 pci_func;
+ u8 mac[ETH_ALEN];
+};
+
+struct qlcnic_eswitch {
+ u8 port;
+ u8 active_vports;
+ u8 active_vlans;
+ u8 active_ucast_filters;
+ u8 max_ucast_filters;
+ u8 max_active_vlans;
+
+ u32 flags;
+#define QLCNIC_SWITCH_ENABLE BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
+};
+
+
+/* Return codes for Error handling */
+#define QL_STATUS_INVALID_PARAM -1
+
+#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
+#define MAX_VLAN_ID 4095
+#define MIN_VLAN_ID 2
+#define DEFAULT_MAC_LEARN 1
+
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
+
+struct qlcnic_pci_func_cfg {
+ u16 func_type;
+ u16 min_bw;
+ u16 max_bw;
+ u16 port_num;
+ u8 pci_func;
+ u8 func_state;
+ u8 def_mac_addr[ETH_ALEN];
+};
+
+struct qlcnic_npar_func_cfg {
+ u32 fw_capab;
+ u16 port_num;
+ u16 min_bw;
+ u16 max_bw;
+ u16 max_tx_queues;
+ u16 max_rx_queues;
+ u8 pci_func;
+ u8 op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+ u8 pci_func;
+ u8 action;
+ u8 dest_npar;
+ u8 reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+ u16 vlan_id;
+ u8 op_mode;
+ u8 op_type;
+ u8 pci_func;
+ u8 host_vlan_tag;
+ u8 promisc_mode;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 offload_flags;
+ u8 reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION 1
+#define QLCNIC_STATS_PORT 1
+#define QLCNIC_STATS_ESWITCH 2
+#define QLCNIC_QUERY_RX_COUNTER 0
+#define QLCNIC_QUERY_TX_COUNTER 1
+#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
+#define QLCNIC_FILL_STATS(VAL1) \
+ (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
+#define QLCNIC_MAC_STATS 1
+#define QLCNIC_ESW_STATS 2
+
+#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
+do { \
+ if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+ (VAL1) = (VAL2); \
+ else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+ (VAL1) += (VAL2); \
+} while (0)
+
+struct qlcnic_mac_statistics_le {
+ __le64 mac_tx_frames;
+ __le64 mac_tx_bytes;
+ __le64 mac_tx_mcast_pkts;
+ __le64 mac_tx_bcast_pkts;
+ __le64 mac_tx_pause_cnt;
+ __le64 mac_tx_ctrl_pkt;
+ __le64 mac_tx_lt_64b_pkts;
+ __le64 mac_tx_lt_127b_pkts;
+ __le64 mac_tx_lt_255b_pkts;
+ __le64 mac_tx_lt_511b_pkts;
+ __le64 mac_tx_lt_1023b_pkts;
+ __le64 mac_tx_lt_1518b_pkts;
+ __le64 mac_tx_gt_1518b_pkts;
+ __le64 rsvd1[3];
+
+ __le64 mac_rx_frames;
+ __le64 mac_rx_bytes;
+ __le64 mac_rx_mcast_pkts;
+ __le64 mac_rx_bcast_pkts;
+ __le64 mac_rx_pause_cnt;
+ __le64 mac_rx_ctrl_pkt;
+ __le64 mac_rx_lt_64b_pkts;
+ __le64 mac_rx_lt_127b_pkts;
+ __le64 mac_rx_lt_255b_pkts;
+ __le64 mac_rx_lt_511b_pkts;
+ __le64 mac_rx_lt_1023b_pkts;
+ __le64 mac_rx_lt_1518b_pkts;
+ __le64 mac_rx_gt_1518b_pkts;
+ __le64 rsvd2[3];
+
+ __le64 mac_rx_length_error;
+ __le64 mac_rx_length_small;
+ __le64 mac_rx_length_large;
+ __le64 mac_rx_jabber;
+ __le64 mac_rx_dropped;
+ __le64 mac_rx_crc_error;
+ __le64 mac_align_error;
+} __packed;
+
+struct qlcnic_mac_statistics {
+ u64 mac_tx_frames;
+ u64 mac_tx_bytes;
+ u64 mac_tx_mcast_pkts;
+ u64 mac_tx_bcast_pkts;
+ u64 mac_tx_pause_cnt;
+ u64 mac_tx_ctrl_pkt;
+ u64 mac_tx_lt_64b_pkts;
+ u64 mac_tx_lt_127b_pkts;
+ u64 mac_tx_lt_255b_pkts;
+ u64 mac_tx_lt_511b_pkts;
+ u64 mac_tx_lt_1023b_pkts;
+ u64 mac_tx_lt_1518b_pkts;
+ u64 mac_tx_gt_1518b_pkts;
+ u64 rsvd1[3];
+ u64 mac_rx_frames;
+ u64 mac_rx_bytes;
+ u64 mac_rx_mcast_pkts;
+ u64 mac_rx_bcast_pkts;
+ u64 mac_rx_pause_cnt;
+ u64 mac_rx_ctrl_pkt;
+ u64 mac_rx_lt_64b_pkts;
+ u64 mac_rx_lt_127b_pkts;
+ u64 mac_rx_lt_255b_pkts;
+ u64 mac_rx_lt_511b_pkts;
+ u64 mac_rx_lt_1023b_pkts;
+ u64 mac_rx_lt_1518b_pkts;
+ u64 mac_rx_gt_1518b_pkts;
+ u64 rsvd2[3];
+ u64 mac_rx_length_error;
+ u64 mac_rx_length_small;
+ u64 mac_rx_length_large;
+ u64 mac_rx_jabber;
+ u64 mac_rx_dropped;
+ u64 mac_rx_crc_error;
+ u64 mac_align_error;
+};
+
+struct qlcnic_esw_stats_le {
+ __le16 context_id;
+ __le16 version;
+ __le16 size;
+ __le16 unused;
+ __le64 unicast_frames;
+ __le64 multicast_frames;
+ __le64 broadcast_frames;
+ __le64 dropped_frames;
+ __le64 errors;
+ __le64 local_frames;
+ __le64 numbytes;
+ __le64 rsvd[3];
+} __packed;
+
+struct __qlcnic_esw_statistics {
+ u16 context_id;
+ u16 version;
+ u16 size;
+ u16 unused;
+ u64 unicast_frames;
+ u64 multicast_frames;
+ u64 broadcast_frames;
+ u64 dropped_frames;
+ u64 errors;
+ u64 local_frames;
+ u64 numbytes;
+ u64 rsvd[3];
+};
+
+struct qlcnic_esw_statistics {
+ struct __qlcnic_esw_statistics rx;
+ struct __qlcnic_esw_statistics tx;
+};
+
+#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
+#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
+#define QLCNIC_FORCE_FW_RESET 0xdeaddead
+#define QLCNIC_SET_QUIESCENT 0xadd00010
+#define QLCNIC_RESET_QUIESCENT 0xadd00020
+
+struct _cdrp_cmd {
+ u32 num;
+ u32 *arg;
+};
+
+struct qlcnic_cmd_args {
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
+};
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
+int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define QLCRD32(adapter, off, err) \
+ (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
+
+#define QLCWR32(adapter, off, val) \
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+#define __QLCNIC_MAX_LED_RATE 0xf
+#define __QLCNIC_MAX_LED_STATE 0x2
+
+#define MAX_CTL_CHECK 1000
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
+int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+
+/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16,
+ enum qlcnic_mac_type);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
+void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
+
+/* Functions from qlcnic_ethtool.c */
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
+void qlcnic_set_drv_version(struct qlcnic_adapter *);
+
+/* eSwitch management functions */
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
+int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
+
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_board_info {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (likely(tx_ring->producer < tx_ring->sw_consumer))
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+ int (*shutdown)(struct pci_dev *);
+ int (*resume)(struct qlcnic_adapter *);
+};
+
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ void (*del_rx_ctx) (struct qlcnic_adapter *);
+ void (*del_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ int (*get_board_info) (struct qlcnic_adapter *);
+ void (*set_mac_filter_count) (struct qlcnic_adapter *);
+ void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off, u32 data)
+{
+ return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac, u8 function)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (adapter->ahw->hw_ops->mbx_cmd)
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+ return -EIO;
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->add_sysfs)
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->remove_sysfs)
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr)
+{
+ return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, u16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ return adapter->nic_ops->shutdown(pdev);
+}
+
+static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->resume(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, u16 id)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->free_mac_list(adapter);
+}
+
+static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->set_mac_filter_count)
+ adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ if (adapter->nic_ops->request_reset)
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ if (adapter->nic_ops->cancel_idc_work)
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
+{
+ return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
+{
+ clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
+
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
+
+static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
+}
+
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+ return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
+}
+
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
+
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+ u32 *tmp = buffer;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ *tmp = swab32(*tmp);
+ tmp++;
+ }
+#endif
+}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
+#endif /* __QLCNIC_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 000000000..840bf36b5
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,4165 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
+
+/* 83xx Module type */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */
+#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */
+#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/
+#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */
+
+/* Port types */
+#define QLC_83XX_10_CAPABLE BIT_8
+#define QLC_83XX_100_CAPABLE BIT_9
+#define QLC_83XX_1G_CAPABLE BIT_10
+#define QLC_83XX_10G_CAPABLE BIT_11
+#define QLC_83XX_AUTONEG_ENABLE BIT_15
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+ {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+ {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+ {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+};
+
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partiton info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+ .shutdown = qlcnic_83xx_shutdown,
+ .resume = qlcnic_83xx_resume,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ int *err)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ *err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!*err) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%lx\n", __func__, addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ num_msix += adapter->drv_tx_rings;
+
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
+ }
+
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl = vzalloc(num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
+ qlcnic_83xx_enable_legacy(adapter);
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 num_msix;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_set_legacy_intr_mask(adapter);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ msleep(20);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = (val >> 24) & 0xff;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret = 0;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ data = QLCRD32(adapter, (u32) offset, &ret);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE) {
+ ahw->port_type = QLCNIC_XGBE;
+ } else if (ahw->port_config & QLC_83XX_10_CAPABLE ||
+ ahw->port_config & QLC_83XX_100_CAPABLE ||
+ ahw->port_config & QLC_83XX_1G_CAPABLE) {
+ ahw->port_type = QLCNIC_GBE;
+ } else {
+ ahw->port_type = QLCNIC_XGBE;
+ }
+
+ if (QLC_83XX_AUTONEG(ahw->port_config))
+ ahw->link_autoneg = AUTONEG_ENABLE;
+
+ }
+ return status;
+}
+
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ qlcnic_sriov_vf_set_ops(adapter);
+ return;
+ }
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ if (pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV))
+ set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
+
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
+
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
+}
+
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
+
+ if (!mbx)
+ return -EIO;
+
+ opcode = LSW(cmd->req.arg[0]);
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ }
+
+ return cmd->rsp_opcode;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_83XX_FW_MBX_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
+ return 0;
+ }
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
+ return -EINVAL;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+ if (err)
+ return;
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
+ break;
+ case QLCNIC_MBX_BC_EVENT:
+ qlcnic_sriov_handle_bc_event(adapter, event[1]);
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (resp & QLCNIC_SET_OWNER) {
+ event = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+ }
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+
+ adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
+
+ if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_83xx_process_aen(adapter);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
+ (HZ / 10));
+}
+
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
+}
+
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+ cancel_delayed_work_sync(&adapter->mbx_poll_work);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
+ ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = recv_ctx->context_id | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_SDS_RINGS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+ &cmd.req.arg[6]);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 temp = 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = tx_ring->ctx_id | temp;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask, temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 msix_vector;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ msix_vector = adapter->drv_sds_rings + ring;
+ else
+ msix_vector = adapter->drv_sds_rings - 1;
+ msix_id = ahw->intr_tbl[msix_vector].id;
+ } else {
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ }
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+ u8 num_sds_ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u16 adapter_state = adapter->is_up;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+ adapter->drv_sds_rings = num_sds_ring;
+ qlcnic_attach(adapter);
+ }
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ u8 drv_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ adapter->ahw->diag_test = 0;
+ adapter->drv_sds_rings = drv_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+out:
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ if (status)
+ return status;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (enable)
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ else
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ u32 temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
+
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "Loopback test not supported in non privileged mode\n");
+ return -ENOTSUPP;
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device is in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ netdev_info(netdev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ ret = -EBUSY;
+ goto free_diag_res;
+ }
+ if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_alloc:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
+ struct net_device *netdev = adapter->netdev;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp = 0, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (err)
+ return;
+
+ qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+ if (mode == QLCNIC_IP_UP)
+ cmd.req.arg[1] = 1 | temp;
+ else
+ cmd.req.arg[1] = 2 | temp;
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+ if (err)
+ return err;
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+
+ if (vlan_id)
+ op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+
+ cmd->req.arg[1] = op | (1 << 8);
+ qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+ cmd->req.arg[1] |= temp;
+ mv.vlan = vlan_id;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan_id)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ function = 0;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
+
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ if (link_status) {
+ ahw->link_speed = MSW(data[2]);
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ ahw->link_duplex = DUPLEX_FULL;
+ else
+ ahw->link_duplex = DUPLEX_HALF;
+ } else {
+ ahw->link_speed = SPEED_UNKNOWN;
+ ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+
+ ahw->link_autoneg = MSB(MSW(data[3]));
+ ahw->module_type = MSB(LSW(data[3]));
+ ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_mailbox *mbx;
+ u32 mask, resp, event;
+ unsigned long flags;
+
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ __qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ return IRQ_HANDLED;
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ if (func_id != ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+ memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+ sizeof(ahw->extra_capability));
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0, j = 0;
+ u32 temp;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+ }
+ } else {
+ dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
+ err = -EIO;
+ }
+
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ u8 max_ints;
+ u32 val, temp, type;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = max_ints;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ u32 word, range, flash_offset, addr = flash_addr, ret;
+ ulong indirect_add, direct_window;
+ int i, err = 0;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr & 0xFFFF0000));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+ int err = 0;
+
+ do {
+ status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ usleep_range(1000, 1100);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+ qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO, err = 0;
+
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ u32 word, addr = flash_addr, ret;
+ ulong indirect_addr;
+ int i, err = 0;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_addr, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ u8 pci_func;
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ pci_func = adapter->portnum;
+ else
+ pci_func = ahw->pci_func;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LRM:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LR:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_SR:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_TP_1000BASE_T:
+ ahw->supported_type = PORT_TP;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_LEGACY_CP:
+ case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP:
+ ahw->supported_type = PORT_DA;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ default:
+ ahw->supported_type = PORT_OTHER;
+ ahw->port_type = QLCNIC_XGBE;
+ }
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = 0;
+ int status = 0;
+
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->duplex = ahw->link_duplex;
+ ecmd->autoneg = ahw->link_autoneg;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ ecmd->supported = (SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_Autoneg);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ahw->port_config & QLC_83XX_10_CAPABLE)
+ ecmd->advertising |= SUPPORTED_10baseT_Full;
+ if (ahw->port_config & QLC_83XX_100_CAPABLE)
+ ecmd->advertising |= SUPPORTED_100baseT_Full;
+ if (ahw->port_config & QLC_83XX_1G_CAPABLE)
+ ecmd->advertising |= SUPPORTED_1000baseT_Full;
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE)
+ ecmd->advertising |= SUPPORTED_10000baseT_Full;
+ if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ } else {
+ switch (ahw->link_speed) {
+ case SPEED_10:
+ ecmd->advertising = SUPPORTED_10baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = SUPPORTED_100baseT_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = SUPPORTED_1000baseT_Full;
+ break;
+ case SPEED_10000:
+ ecmd->advertising = SUPPORTED_10000baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ switch (ahw->supported_type) {
+ case PORT_FIBRE:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case PORT_TP:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ case PORT_DA:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_OTHER;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ ecmd->phy_address = ahw->physical_port;
+ return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = adapter->ahw->port_config;
+ int status = 0;
+
+ /* 83xx devices do not support Half duplex */
+ if (ecmd->duplex == DUPLEX_HALF) {
+ netdev_info(adapter->netdev,
+ "Half duplex mode not supported\n");
+ return -EINVAL;
+ }
+
+ if (ecmd->autoneg) {
+ ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
+ ahw->port_config |= (QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ } else { /* force speed */
+ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10_CAPABLE;
+ break;
+ case SPEED_100:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_100_CAPABLE;
+ break;
+ case SPEED_1000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_1G_CAPABLE;
+ break;
+ case SPEED_10000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10G_CAPABLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_info(adapter->netdev,
+ "Failed to Set Link Speed and autoneg.\n");
+ ahw->port_config = config;
+ }
+
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* fill in eSwitch stats */
+ for (; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ struct net_device *netdev = adapter->netdev;
+ int ret = 0;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ if (ret)
+ return;
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting Tx stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ netdev_err(netdev, "Error getting Rx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ u32 data;
+ u16 intrpt_id, id;
+ int ret;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1;
+ }
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 temp;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO)
+ return err;
+
+ return temp & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int err = 0;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ return err;
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ qlcnic_83xx_set_vnic_opmode(adapter);
+ } else {
+ err = qlcnic_83xx_check_vnic_state(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ err = qlcnic_83xx_idc_reattach_driver(adapter);
+ if (err)
+ return err;
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ reinit_completion(&mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ if (!mbx)
+ return;
+
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (!mbx)
+ return;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 000000000..69f828eb4
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,661 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "qlcnic_hw.h"
+
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+#define QLC_83XX_LB_WAIT_COUNT 250
+#define QLC_83XX_LB_MSLEEP_COUNT 20
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_FW_FILE_NAME_LEN 20
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+ u16 sds_ring_size;
+ u16 rsvd2;
+ u16 rsvd3[2];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrupt id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
+} __packed;
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
+ u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+ u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ u8 delay_reset;
+ char **name;
+};
+
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 94
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+#define QLC_83XX_MAX_MC_COUNT 38
+#define QLC_83XX_MAX_UC_COUNT 4096
+
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
+#define QLC_83XX_SRIOV_MODE 0x1
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
+#define QLC_INIT_FW_RESOURCES BIT_31
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *, int);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 000000000..33669c29b
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2616 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+/* EPORT control registers */
+#define QLC_83XX_RESET_CONTROL 0x28084E50
+#define QLC_83XX_RESET_REG 0x28084E60
+#define QLC_83XX_RESET_PORT0 0x28084E70
+#define QLC_83XX_RESET_PORT1 0x28084E80
+#define QLC_83XX_RESET_PORT2 0x28084E90
+#define QLC_83XX_RESET_PORT3 0x28084EA0
+#define QLC_83XX_RESET_SRESHIM 0x28084EB0
+#define QLC_83XX_RESET_EPGSHIM 0x28084EC0
+#define QLC_83XX_RESET_ETHERPCS 0x28084ED0
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+#if defined(__LITTLE_ENDIAN)
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+ val |= (adapter->portnum & 0xf);
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear graceful reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+
+ /* Disable mailbox interrupt */
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_reset(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ qlcnic_83xx_initialize_nic(adapter, 1);
+
+ err = qlcnic_sriov_pf_reinit(adapter);
+ if (err)
+ return err;
+
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter->dcb);
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ ahw->idc.quiesce_req = 0;
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->idc.err_code = 0;
+ ahw->idc.collect_dump = 0;
+ ahw->reset_context = 0;
+ adapter->tx_timeo_cnt = 0;
+ ahw->idc.delay_reset = 0;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ int ret = 0;
+ u32 val;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ adapter->ahw->reset_context = 0;
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are received from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ adapter->ahw->idc.delay_reset = 1;
+ return 0;
+ } else {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+ }
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ return -1;
+ } else {
+ if (adapter->ahw->idc.delay_reset) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ adapter->ahw->idc.delay_reset = 0;
+ }
+
+ /* Check for ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return -1;
+ }
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ ahw->idc.err_code = -EIO;
+
+ return;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+ u16 port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+ cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+ QLC_83XX_SET_VXLAN_UDP_DPORT |
+ QLC_83XX_VXLAN_UDP_DPORT(port);
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to set VXLAN port %d in adapter\n",
+ port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+ bool state)
+{
+ u16 vxlan_port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_INGRESS_ENCAP);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+ QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to %s VXLAN parsing for port %d\n",
+ state ? "enable" : "disable", vxlan_port);
+ else
+ netdev_info(adapter->netdev,
+ "%s VXLAN parsing for port %d\n",
+ state ? "Enabled" : "Disabled", vxlan_port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+#endif
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_port(adapter))
+ return;
+
+ if (qlcnic_set_vxlan_parsing(adapter, true))
+ return;
+
+ adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+ } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_parsing(adapter, false))
+ return;
+
+ adapter->ahw->vxlan_port = 0;
+ adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+ }
+#endif
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = vzalloc(size);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ vfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
+ u32 dest, *p_cache, *temp;
+ int i, ret = -EIO;
+ __le32 *temp_le;
+ u8 data[16];
+ size_t size;
+ u64 addr;
+
+ temp = kzalloc(fw->size, GFP_KERNEL);
+ if (!temp) {
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ return -ENOMEM;
+ }
+
+ temp_le = (__le32 *)fw->data;
+
+ /* FW image in file is in little endian, swap the data to nullify
+ * the effect of writel() operation on big endian platform.
+ */
+ for (i = 0; i < fw->size / sizeof(u32); i++)
+ temp[i] = __le32_to_cpu(temp_le[i]);
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (fw->size & ~0xF);
+ p_cache = temp;
+ addr = (u64)dest;
+
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ goto exit;
+ }
+
+ /* alignment check */
+ if (fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = temp[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ goto exit;
+ }
+ }
+
+exit:
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ kfree(temp);
+
+ return ret;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+ int err = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
+{
+ QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO, err = 0;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_take_eport_out_of_reset(p_dev);
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ int timeout_error, err = 0;
+ u32 value;
+ u8 retries;
+
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+ u32 addr, count, prev_ver, curr_ver;
+ u8 *p_buff;
+
+ if (ahw->reset.buff != NULL) {
+ prev_ver = p_dev->fw_version;
+ curr_ver = qlcnic_83xx_get_fw_version(p_dev);
+ if (curr_ver > prev_ver)
+ kfree(ahw->reset.buff);
+ else
+ return 0;
+ }
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (p_dev->ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int err = 0;
+ u32 value;
+
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int err = 0;
+ u32 value;
+
+ if (p_rmw_hdr->index_a) {
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ } else {
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ }
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i, err = 0;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ QLCRD32(p_dev, arg1, &err);
+ if (err == -EIO)
+ return;
+ QLCRD32(p_dev, arg2, &err);
+ if (err == -EIO)
+ return;
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j, err;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return;
+
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+/* POST FW related definations*/
+#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
+#define QLC_83XX_POST_MODE_REG 0x41602018
+#define QLC_83XX_POST_FAST_MODE 0
+#define QLC_83XX_POST_MEDIUM_MODE 1
+#define QLC_83XX_POST_SLOW_MODE 2
+
+/* POST Timeout values in milliseconds */
+#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690
+#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930
+#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500
+
+/* POST result values */
+#define QLC_83XX_POST_PASS 0xfffffff0
+#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff
+#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe
+#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc
+#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8
+
+static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ struct device *dev = &adapter->pdev->dev;
+ int timeout, count, ret = 0;
+ u32 signature;
+
+ /* Set timeout values with extra 2 seconds of buffer */
+ switch (adapter->ahw->post_mode) {
+ case QLC_83XX_POST_FAST_MODE:
+ timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_MEDIUM_MODE:
+ timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_SLOW_MODE:
+ timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+
+ ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
+ if (ret) {
+ dev_err(dev, "POST firmware can not be loaded, skipping POST\n");
+ return 0;
+ }
+
+ ret = qlcnic_83xx_copy_fw_file(adapter);
+ if (ret)
+ return ret;
+
+ /* clear QLC_83XX_POST_SIGNATURE_REG register */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0);
+
+ /* Set POST mode */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG,
+ adapter->ahw->post_mode);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+
+ qlcnic_83xx_start_hw(adapter);
+
+ count = 0;
+ do {
+ msleep(100);
+ count += 100;
+
+ signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG);
+ if (signature == QLC_83XX_POST_PASS)
+ break;
+ } while (timeout > count);
+
+ if (timeout <= count) {
+ dev_err(dev, "POST timed out, signature = 0x%08x\n", signature);
+ return -EIO;
+ }
+
+ switch (signature) {
+ case QLC_83XX_POST_PASS:
+ dev_info(dev, "POST passed, Signature = 0x%08x\n", signature);
+ break;
+ case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_DDR_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_FLASH_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ default:
+ dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ int err = -EIO;
+
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+
+ /* Check if POST needs to be run */
+ if (adapter->ahw->run_post) {
+ err = qlcnic_83xx_run_post(adapter);
+ if (err)
+ return err;
+
+ /* No need to run POST in next reset sequence */
+ adapter->ahw->run_post = false;
+
+ /* Again reset the adapter to load regular firmware */
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ err = qlcnic_83xx_copy_bootloader(adapter);
+ if (err)
+ return err;
+ }
+
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file == 1) {
+ if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
+ */
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLCNIC_VNIC_MODE;
+
+ return QLC_83XX_DEFAULT_OPMODE;
+}
+
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 max_sds_rings, max_tx_rings;
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+
+ max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ } else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
+ return -EIO;
+ }
+
+ adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+ adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+ if (status)
+ return;
+
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
+
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
+
+ adapter->rx_mac_learn = false;
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ /* Check if POST needs to be run */
+ switch (qlcnic_load_fw_file) {
+ case 2:
+ ahw->post_mode = QLC_83XX_POST_FAST_MODE;
+ ahw->run_post = true;
+ break;
+ case 3:
+ ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE;
+ ahw->run_post = true;
+ break;
+ case 4:
+ ahw->post_mode = QLC_83XX_POST_SLOW_MODE;
+ ahw->run_post = true;
+ break;
+ default:
+ ahw->run_post = false;
+ break;
+ }
+
+ qlcnic_83xx_init_rings(adapter);
+
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ qlcnic_83xx_clear_function_resources(adapter);
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+
+ /* Perform operating mode specific initialization */
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_initialize_nic(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 000000000..be7d7a62c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,284 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ id = ahw->pci_func;
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
+
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->total_nic_func);
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+ switch (priv_level) {
+ case QLCNIC_NON_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ break;
+ case QLCNIC_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ break;
+ case QLCNIC_MGMT_FUNC:
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+ return -EIO;
+ }
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
+
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
+
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 state;
+
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+ msleep(1000);
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ }
+
+ if (!idc->vnic_wait_limit) {
+ dev_err(&adapter->pdev->dev,
+ "vNIC mode not operational, state check timed out.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
+
+ return err;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000..6e6f18fc5
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,1428 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = type;
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
+}
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0, err = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ mdelay(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i, err = 0;
+ u32 rsp;
+ u32 signature;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
+
+ signature = qlcnic_get_cmd_signature(ahw);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter)) {
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
+ }
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ for (i = 1; i < cmd->req.num; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
+ switch (cmd->rsp.arg[0]) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ fmt = "CDRP invalid args: [%d]\n";
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ fmt = "CDRP command not supported: [%d]\n";
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ fmt = "CDRP requested action not permitted: [%d]\n";
+ break;
+ case QLCNIC_RCODE_INVALID:
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ fmt = "CDRP command timeout: [%d]\n";
+ break;
+ default:
+ fmt = "CDRP command failed: [%d]\n";
+ break;
+ }
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+ return cmd->rsp.arg[0];
+}
+
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
+
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
+ if (err)
+ return err;
+
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ u8 i, nrds_rings, nsds_rings;
+ struct qlcnic_cmd_args cmd;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
+ int err;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->drv_sds_rings;
+
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+ | QLCNIC_CAP0_VALIDOFF);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = 0;
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+ rds_ring->producer = 0;
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+ sds_ring->consumer = 0;
+ memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
+ return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ if (err)
+ return;
+
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ struct qlcnic_cmd_args cmd;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
+
+ /* reset host resources */
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+ *(tx_ring->hw_consumer) = 0;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ prq = rq_addr;
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->drv_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
+
+ prq->interrupt_ctl = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->drv_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
+ } else {
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ if (ret)
+ return;
+
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int
+qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err, ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
+ if (ptr == NULL)
+ return -ENOMEM;
+
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+ }
+
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
+{
+ int i, err, ring;
+
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(dev->pdev);
+ dev->flags &= ~QLCNIC_NEED_FLR;
+ }
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
+
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
+ if (err)
+ goto err_out;
+
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_del_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
+
+ goto err_out;
+ }
+ }
+
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
+ return 0;
+
+err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+
+ return err;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
+ int ring;
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_del_rx_ctx(adapter);
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_fw_cmd_del_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
+ /* Allow dma queues to drain after context reset */
+ mdelay(20);
+ }
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
+
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = function | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mac address%d\n", err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ dma_addr_t nic_dma_t;
+ const struct qlcnic_info_le *nic_info;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ size_t nic_size = sizeof(struct qlcnic_info_le);
+
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ nic_info = nic_info_addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
+ npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
+ npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+ npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+ npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+ npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+ npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+ npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+ npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+
+ return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int err = -EIO;
+ dma_addr_t nic_dma_t;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_info_le *nic_info;
+ size_t nic_size = sizeof(struct qlcnic_info_le);
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ nic_info = nic_info_addr;
+
+ nic_info->pci_func = cpu_to_le16(nic->pci_func);
+ nic_info->op_mode = cpu_to_le16(nic->op_mode);
+ nic_info->phys_port = cpu_to_le16(nic->phys_port);
+ nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+ nic_info->capabilities = cpu_to_le32(nic->capabilities);
+ nic_info->max_mac_filters = nic->max_mac_filters;
+ nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+ nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+ nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+ nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set nic info%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+
+ return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t pci_info_dma_t;
+ void *pci_info_addr;
+ int err = 0, i;
+
+ pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
+ if (!pci_info_addr)
+ return -ENOMEM;
+
+ npar = pci_info_addr;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
+ pci_info->id = le16_to_cpu(npar->id);
+ pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
+ pci_info->type = le16_to_cpu(npar->type);
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
+ pci_info->default_port =
+ le16_to_cpu(npar->default_port);
+ pci_info->tx_min_bw =
+ le16_to_cpu(npar->tx_min_bw);
+ pci_info->tx_max_bw =
+ le16_to_cpu(npar->tx_max_bw);
+ memcpy(pci_info->mac, npar->mac, ETH_ALEN);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PCI Info%d\n", err);
+ err = -EIO;
+ }
+
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
+ pci_info_dma_t);
+
+ return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+ u8 enable_mirroring, u8 pci_func)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+ u32 arg1;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
+ return err;
+ }
+
+ arg1 = id | (enable_mirroring ? BIT_4 : 0);
+ arg1 |= pci_func << 8;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_PORTMIRRORING);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
+ pci_func, id);
+ else
+ dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
+ pci_func, id);
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
+ struct qlcnic_esw_stats_le *stats;
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ u32 arg1;
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+
+ if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
+ (func != adapter->ahw->pci_func)) {
+ dev_err(&adapter->pdev->dev,
+ "Not privilege to query stats for func=%d", func);
+ return -EIO;
+ }
+
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
+ return -ENOMEM;
+
+ arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+ arg1 |= rx_tx << 15 | stats_size << 16;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (!err) {
+ stats = stats_addr;
+ esw_stats->context_id = le16_to_cpu(stats->context_id);
+ esw_stats->version = le16_to_cpu(stats->version);
+ esw_stats->size = le16_to_cpu(stats->size);
+ esw_stats->multicast_frames =
+ le64_to_cpu(stats->multicast_frames);
+ esw_stats->broadcast_frames =
+ le64_to_cpu(stats->broadcast_frames);
+ esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+ esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+ esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+ esw_stats->errors = le64_to_cpu(stats->errors);
+ esw_stats->numbytes = le64_to_cpu(stats->numbytes);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+ stats_dma_t);
+
+ return err;
+}
+
+/* This routine will retrieve the MAC statistics from firmware */
+int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_mac_statistics *mac_stats)
+{
+ struct qlcnic_mac_statistics_le *stats;
+ struct qlcnic_cmd_args cmd;
+ size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ int err;
+
+ if (mac_stats == NULL)
+ return -ENOMEM;
+
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ stats = stats_addr;
+ mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
+ mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
+ mac_stats->mac_tx_mcast_pkts =
+ le64_to_cpu(stats->mac_tx_mcast_pkts);
+ mac_stats->mac_tx_bcast_pkts =
+ le64_to_cpu(stats->mac_tx_bcast_pkts);
+ mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
+ mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
+ mac_stats->mac_rx_mcast_pkts =
+ le64_to_cpu(stats->mac_rx_mcast_pkts);
+ mac_stats->mac_rx_length_error =
+ le64_to_cpu(stats->mac_rx_length_error);
+ mac_stats->mac_rx_length_small =
+ le64_to_cpu(stats->mac_rx_length_small);
+ mac_stats->mac_rx_length_large =
+ le64_to_cpu(stats->mac_rx_length_large);
+ mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
+ mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
+ mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+ stats_dma_t);
+
+ return err;
+}
+
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ struct __qlcnic_esw_statistics port_stats;
+ u8 i;
+ int ret = -EIO;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+ if (adapter->npars == NULL)
+ return -EIO;
+
+ memset(esw_stats, 0, sizeof(u64));
+ esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->context_id = eswitch;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].phy_port != eswitch)
+ continue;
+
+ memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
+ rx_tx, &port_stats))
+ continue;
+
+ esw_stats->size = port_stats.size;
+ esw_stats->version = port_stats.version;
+ QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
+ port_stats.unicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
+ port_stats.multicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
+ port_stats.broadcast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
+ port_stats.dropped_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->errors,
+ port_stats.errors);
+ QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
+ port_stats.local_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
+ port_stats.numbytes);
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+ const u8 port, const u8 rx_tx)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 arg1;
+
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+
+ if (func_esw == QLCNIC_STATS_PORT) {
+ if (port >= ahw->max_vnic_func)
+ goto err_ret;
+ } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+ if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+ goto err_ret;
+ } else {
+ goto err_ret;
+ }
+
+ if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+ goto err_ret;
+
+ arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+ arg1 |= BIT_14 | rx_tx << 15;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+
+err_ret:
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
+ return -EIO;
+}
+
+static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ u8 pci_func = *arg1 >> 8;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS)
+ dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
+ pci_func);
+ else
+ dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
+ pci_func);
+ return err;
+}
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO, index;
+ u32 arg1, arg2 = 0;
+ u8 pci_func;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
+ return err;
+ }
+
+ pci_func = esw_cfg->pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return err;
+ arg1 = (adapter->npars[index].phy_port & BIT_0);
+ arg1 |= (pci_func << 8);
+
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return err;
+ arg1 &= ~(0x0ff << 8);
+ arg1 |= (pci_func << 8);
+ arg1 &= ~(BIT_2 | BIT_3);
+ switch (esw_cfg->op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ arg1 |= (BIT_4 | BIT_6 | BIT_7);
+ arg2 |= (BIT_0 | BIT_1);
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ arg2 |= (BIT_2 | BIT_3);
+ if (!(esw_cfg->discard_tagged))
+ arg1 &= ~BIT_4;
+ if (!(esw_cfg->promisc_mode))
+ arg1 &= ~BIT_6;
+ if (!(esw_cfg->mac_override))
+ arg1 &= ~BIT_7;
+ if (!(esw_cfg->mac_anti_spoof))
+ arg2 &= ~BIT_0;
+ if (!(esw_cfg->offload_flags & BIT_0))
+ arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+ if (!(esw_cfg->offload_flags & BIT_1))
+ arg2 &= ~BIT_2;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ arg2 &= ~BIT_3;
+ break;
+ case QLCNIC_ADD_VLAN:
+ arg1 &= ~(0x0ffff << 16);
+ arg1 |= (BIT_2 | BIT_5);
+ arg1 |= (esw_cfg->vlan_id << 16);
+ break;
+ case QLCNIC_DEL_VLAN:
+ arg1 |= (BIT_3 | BIT_5);
+ arg1 &= ~(0x0ffff << 16);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
+ return err;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_ESWITCH);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
+ pci_func);
+ else
+ dev_info(dev, "Configured eSwitch for vNIC function %d\n",
+ pci_func);
+
+ return err;
+}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ u32 arg1, arg2;
+ int index;
+ u8 phy_port;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
+ if (index < 0)
+ return -EIO;
+ phy_port = adapter->npars[index].phy_port;
+ } else {
+ phy_port = adapter->ahw->physical_port;
+ }
+ arg1 = phy_port;
+ arg1 |= (esw_cfg->pci_func << 8);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return -EIO;
+
+ esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+ esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+ esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+ esw_cfg->mac_override = !!(arg1 & BIT_7);
+ esw_cfg->vlan_id = LSW(arg1 >> 16);
+ esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+ esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 000000000..a72bcddf1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1147 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
+};
+
+static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_adapter *adapter;
+
+ if (!dcb)
+ return;
+
+ adapter = dcb->adapter;
+
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&dcb->adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
+}
+
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 idx)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ u32 *val = data;
+
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+ else
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+ *app_count = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 000000000..3cf4a10fb
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,122 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
+
+#ifdef CONFIG_QLCNIC_DCB
+int qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb;
+
+struct qlcnic_dcb_ops {
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
+};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+ kfree(dcb);
+ dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000..494e8105a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1882 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+ QLC_OFF(stats.encap_lso_frames)},
+ {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+ QLC_OFF(stats.encap_tx_csummed)},
+ {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+ QLC_OFF(stats.encap_rx_csummed)},
+ {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
+
+};
+
+static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx unicast frames",
+ "tx multicast frames",
+ "tx broadcast frames",
+ "tx dropped frames",
+ "tx errors",
+ "tx local frames",
+ "tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
+};
+
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
+ "mac_tx_frames",
+ "mac_tx_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_pause_cnt",
+ "mac_tx_ctrl_pkt",
+ "mac_tx_lt_64b_pkts",
+ "mac_tx_lt_127b_pkts",
+ "mac_tx_lt_255b_pkts",
+ "mac_tx_lt_511b_pkts",
+ "mac_tx_lt_1023b_pkts",
+ "mac_tx_lt_1518b_pkts",
+ "mac_tx_gt_1518b_pkts",
+ "mac_rx_frames",
+ "mac_rx_bytes",
+ "mac_rx_mcast_pkts",
+ "mac_rx_bcast_pkts",
+ "mac_rx_pause_cnt",
+ "mac_rx_ctrl_pkt",
+ "mac_rx_lt_64b_pkts",
+ "mac_rx_lt_127b_pkts",
+ "mac_rx_lt_255b_pkts",
+ "mac_rx_lt_511b_pkts",
+ "mac_rx_lt_1023b_pkts",
+ "mac_rx_lt_1518b_pkts",
+ "mac_rx_gt_1518b_pkts",
+ "mac_rx_length_error",
+ "mac_rx_length_small",
+ "mac_rx_length_large",
+ "mac_rx_jabber",
+ "mac_rx_dropped",
+ "mac_crc_error",
+ "mac_align_error",
+ "eswitch_frames",
+ "eswitch_bytes",
+ "eswitch_multicast_frames",
+ "eswitch_broadcast_frames",
+ "eswitch_unicast_frames",
+ "eswitch_error_free_frames",
+ "eswitch_error_free_bytes",
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+ "tx_bytes",
+};
+
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_drop_wo_sds_on_card",
+ "ctx_rx_pkts_drop_wo_sds_on_host",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attempted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Internal_Loopback_offline",
+ "External_Loopback_offline",
+ "EEPROM_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ int len = -1;
+
+ if (qlcnic_82xx_check(adapter)) {
+ len = qlcnic_82xx_statistics(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ } else if (qlcnic_83xx_check(adapter)) {
+ len = qlcnic_83xx_statistics(adapter);
+ }
+
+ return len;
+}
+
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+#define QLCNIC_MGMT_API_VERSION 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
+}
+
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
+ int check_sfp_module = 0, err = 0;
+ u16 pcifn = ahw->pci_func;
+
+ /* read which mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
+ ecmd->duplex = adapter->ahw->link_duplex;
+ ecmd->autoneg = adapter->ahw->link_autoneg;
+
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ u32 val = 0;
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
+
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ if (ahw->linkup) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn), &err);
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
+
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->autoneg = ahw->link_autoneg;
+ ecmd->duplex = ahw->link_duplex;
+ goto skip;
+ }
+
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->ahw->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw->board_type) {
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->ahw->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw->board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->ahw->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0, config = 0;
+ /* read which mode */
+ if (ecmd->duplex)
+ config |= 0x1;
+
+ if (ecmd->autoneg)
+ config |= 0x2;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ config |= (0 << 8);
+ break;
+ case SPEED_100:
+ config |= (1 << 8);
+ break;
+ case SPEED_1000:
+ config |= (10 << 8);
+ break;
+ default:
+ return -EIO;
+ }
+
+ ret = qlcnic_fw_cmd_set_port(adapter, config);
+
+ if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
+
+ adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->ahw->link_duplex = ecmd->duplex;
+ adapter->ahw->link_autoneg = ecmd->autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0, err = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+ &err);
+ return i;
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+
+ regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+ (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
+
+ regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+ regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
+
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
+
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err = 0;
+ u32 val;
+
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+ if (err == -EIO)
+ return err;
+ val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
+ return (val == XG_LINK_UP_P3P) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret = -1;
+
+ if (qlcnic_83xx_check(adapter))
+ return 0;
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (tx_ring > adapter->max_tx_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void qlcnic_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
+}
+
+static int qlcnic_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
+ if (channel->other_count || channel->combined_count)
+ return -EINVAL;
+
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
+ if (adapter->drv_sds_rings != channel->rx_count) {
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
+ return err;
+ }
+ adapter->drv_rss_rings = channel->rx_count;
+ }
+
+ if (adapter->drv_tx_rings != channel->tx_count) {
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
+ return err;
+ }
+ adapter->drv_tss_rings = channel->tx_count;
+ }
+
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
+ return err;
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->ahw->physical_port;
+ int err = 0;
+ __u32 val;
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return;
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->ahw->physical_port;
+ int err = 0;
+ __u32 val;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
+ /* read mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return err;
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (!pause->rx_pause || pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+ if (err == -EIO)
+ return err;
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return qlcnic_dev_statistics_len(adapter);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_diag_irq;
+
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto free_diag_res;
+
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ goto done;
+
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+
+free_diag_res:
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+clear_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ return ret;
+}
+
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
+#define QLCNIC_LB_PKT_POLL_COUNT 20
+
+static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+
+ memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
+
+ memcpy(data, mac, ETH_ALEN);
+ memcpy(data + ETH_ALEN, mac, ETH_ALEN);
+
+ memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char buff[QLCNIC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff, mac);
+ return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
+}
+
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i, loop, cnt = 0;
+
+ for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+ skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+ skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+ adapter->ahw->diag_cnt = 0;
+ qlcnic_xmit_frame(skb, adapter->netdev);
+ loop = 0;
+
+ do {
+ msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
+ break;
+ } while (!adapter->ahw->diag_cnt);
+
+ dev_kfree_skb_any(skb);
+
+ if (!adapter->ahw->diag_cnt)
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
+ else
+ cnt++;
+ }
+ if (cnt != i) {
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
+ dev_warn(&adapter->pdev->dev,
+ "WARNING: Please check loopback cable\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int loop = 0;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
+ return 0;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ ret = qlcnic_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_res;
+
+ ahw->diag_cnt = 0;
+ do {
+ msleep(500);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
+ goto free_res;
+ } else if (adapter->ahw->diag_cnt) {
+ ret = adapter->ahw->diag_cnt;
+ goto free_res;
+ }
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_clear_lb_mode(adapter, mode);
+
+ free_res:
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+ clear_it:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
+ data[5] = qlcnic_eeprom_test(dev);
+ if (data[5])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index, i, num_stats;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ }
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_device_gstrings_stats[i],
+ ETH_GSTRING_LEN);
+ }
+ }
+}
+
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
+{
+ if (type == QLCNIC_MAC_STATS) {
+ struct qlcnic_mac_statistics *mac_stats =
+ (struct qlcnic_mac_statistics *)stats;
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ } else if (type == QLCNIC_ESW_STATS) {
+ struct __qlcnic_esw_statistics *esw_stats =
+ (struct __qlcnic_esw_statistics *)stats;
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ }
+ return data;
+}
+
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_tx_queue_stats tx_stats;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ memset(&tx_stats, 0, sizeof(tx_stats));
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
+ tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
+ tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
+ }
+
+ adapter->stats.xmit_on = tx_stats.xmit_on;
+ adapter->stats.xmit_off = tx_stats.xmit_off;
+ adapter->stats.xmitcalled = tx_stats.xmit_called;
+ adapter->stats.xmitfinished = tx_stats.xmit_finished;
+ adapter->stats.txbytes = tx_stats.tx_bytes;
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
+}
+
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_esw_statistics port_stats;
+ struct qlcnic_mac_statistics mac_stats;
+ int index, ret, length, size, ring;
+ char *p;
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+ tx_ring = &adapter->tx_ring[ring];
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
+ } else {
+ data += QLCNIC_TX_STATS_LEN;
+ }
+ }
+
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+
+ memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
+ if (ret)
+ return;
+
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
+ if (ret)
+ return;
+
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
+}
+
+static int qlcnic_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int drv_sds_rings = adapter->drv_sds_rings;
+ int err = -EIO, active = 1;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_led(dev, state);
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(dev, "LED test not supported for non "
+ "privilege function\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+ break;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
+ err = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+ break;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ if (adapter->nic_ops->config_led(adapter, 0, 0xf))
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+ qlcnic_diag_free_res(dev, drv_sds_rings);
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return;
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return err;
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
+
+ return err;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
+ ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+ ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
+ ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
+
+ return 0;
+}
+
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->ahw->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->ahw->msg_enable = msglvl;
+}
+
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
+static int
+qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(adapter->netdev, "FW Dump not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (fw_dump->clr)
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
+ else
+ dump->len = 0;
+
+ if (!qlcnic_check_fw_dump_state(adapter))
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = fw_dump->cap_mask;
+
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+static int
+qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr;
+ __le32 *data;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW Dump not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (!fw_dump->clr) {
+ netdev_info(netdev, "Dump not available\n");
+ return -EINVAL;
+ }
+
+ /* Copy template header first */
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
+ dump->len = copy_sz + fw_dump->size;
+ dump->flag = fw_dump->cap_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(fw_dump->data);
+ fw_dump->data = NULL;
+ fw_dump->clr = 0;
+ netdev_info(netdev, "extracted the FW dump Successfully\n");
+ return 0;
+}
+
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
+static int
+qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool valid_mask = false;
+ int i, ret = 0;
+
+ switch (val->flag) {
+ case QLCNIC_FORCE_FW_DUMP_KEY:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (fw_dump->clr) {
+ netdev_info(netdev,
+ "Previous dump not cleared, not forcing dump\n");
+ break;
+ }
+
+ netdev_info(netdev, "Forcing a FW dump\n");
+ qlcnic_dev_request_reset(adapter, val->flag);
+ break;
+ case QLCNIC_DISABLE_FW_DUMP:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
+ case QLCNIC_ENABLE_FW_DUMP:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
+ case QLCNIC_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing a FW reset\n");
+ qlcnic_dev_request_reset(adapter, val->flag);
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ break;
+
+ case QLCNIC_SET_QUIESCENT:
+ case QLCNIC_RESET_QUIESCENT:
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
+ break;
+
+ default:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
+ valid_mask = true;
+ break;
+ }
+ }
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .set_channels = qlcnic_set_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_phys_id = qlcnic_set_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .get_dump_flag = qlcnic_get_dump_flag,
+ .get_dump_data = qlcnic_get_dump_data,
+ .set_dump = qlcnic_set_dump,
+};
+
+const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+};
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .set_dump = qlcnic_set_dump,
+};
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000..34e467b23
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,948 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "qlcnic_hw.h"
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_EPG QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_TIMER QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+#define MAX_CTL_CHECK 1000
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START BIT_0
+#define TA_CTL_ENABLE BIT_1
+#define TA_CTL_WRITE BIT_2
+#define TA_CTL_BUSY BIT_3
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3P 0x01
+#define XG_LINK_DOWN_P3P 0x02
+#define XG_LINK_STATE_P3P_MASK 0xf
+#define XG_LINK_STATE_P3P(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
+
+#define P3P_LINK_SPEED_MHZ 100
+#define P3P_LINK_SPEED_MASK 0xff
+#define P3P_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3P_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLCNIC_DEV_BADBAD 0xbad0bad0
+
+#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
+
+#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC 1
+#define QLCNIC_TYPE_FCOE 2
+#define QLCNIC_TYPE_ISCSI 3
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR BIT_31
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
+#define QLCNIC_FWERROR_FAN_FAILURE 0x16
+
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
+
+#define QLCNIC_RESET_TIMEOUT_SECS 10
+#define QLCNIC_INIT_TIMEOUT_SECS 30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
+#define QLCNIC_RCVPEG_CHECK_DELAY 10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
+#define QLCNIC_CMDPEG_CHECK_DELAY 500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
+
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_MAX_UC_COUNT 512
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_MSIX_BASE 0x132110
+#define QLCNIC_MAX_VLAN_FILTERS 64
+
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+#define QLCNIC_FW_DUMP_REG1 0x00130060
+#define QLCNIC_FW_DUMP_REG2 0x001e0000
+#define QLCNIC_FLASH_SEM2_LK 0x0013C010
+#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
+#define QLCNIC_FLASH_LOCK_ID 0x001B2100
+
+/* PCI function operational mode */
+enum {
+ QLCNIC_MGMT_FUNC = 0,
+ QLCNIC_PRIV_FUNC = 1,
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_SRIOV_PF_FUNC = 3,
+ QLCNIC_SRIOV_VF_FUNC = 4,
+ QLCNIC_UNKNOWN_FUNC_MODE = 5
+};
+
+enum {
+ QLCNIC_PORT_DEFAULTS = 0,
+ QLCNIC_ADD_VLAN = 1,
+ QLCNIC_DEL_VLAN = 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x) ((uint16_t)((uint32_t)(x)))
+#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define QLCNIC_MS_CTRL 0x41000090
+#define QLCNIC_MS_ADDR_LO 0x41000094
+#define QLCNIC_MS_ADDR_HI 0x41000098
+#define QLCNIC_MS_WRTDATA_LO 0x410000A0
+#define QLCNIC_MS_WRTDATA_HI 0x410000A4
+#define QLCNIC_MS_WRTDATA_ULO 0x410000B0
+#define QLCNIC_MS_WRTDATA_UHI 0x410000B4
+#define QLCNIC_MS_RDDATA_LO 0x410000A8
+#define QLCNIC_MS_RDDATA_HI 0x410000AC
+#define QLCNIC_MS_RDDATA_ULO 0x410000B8
+#define QLCNIC_MS_RDDATA_UHI 0x410000BC
+
+#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000..75ee9e4ce
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1710 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/bitops.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+struct qlcnic_ms_reg_ctrl {
+ u32 ocm_window;
+ u32 control;
+ u32 hi;
+ u32 low;
+ u32 rd[4];
+ u32 wd[4];
+ u64 off;
+};
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ *data = readl(val);
+}
+
+static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ writel(data, val);
+ readl(val);
+}
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int timeout = 0, err = 0, done = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+ &err);
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
+ return -EIO;
+ }
+ usleep_range(1000, 1500);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ int err = 0;
+
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
+}
+
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+{
+ int err = 0;
+ u32 data;
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
+ else {
+ data = QLCRD32(adapter, addr, &err);
+ if (err == -EIO)
+ return err;
+ }
+ return data;
+}
+
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return -EIO;
+
+ tx_ring = &adapter->tx_ring[0];
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ cmd_desc, sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
+ enum qlcnic_mac_type mac_type)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head;
+
+ /* look up if already exists */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
+ return 0;
+ }
+
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
+ if (cur == NULL)
+ return -ENOMEM;
+
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+
+ if (qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
+ kfree(cur);
+ return -EIO;
+ }
+
+ cur->vlan_id = vlan;
+ cur->mac_type = mac_type;
+
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return 0;
+}
+
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head, *tmp;
+
+ list_for_each_safe(head, tmp, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (cur->mac_type != QLCNIC_MULTICAST_MAC)
+ continue;
+
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ cur->vlan_id, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan,
+ QLCNIC_UNICAST_MAC);
+ qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC);
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else if (!netdev_mc_empty(netdev)) {
+ qlcnic_flush_mcast_mac(adapter);
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+ QLCNIC_MULTICAST_MAC);
+ }
+
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+ QLCNIC_UNICAST_MAC);
+ }
+
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
+}
+
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ int i;
+ unsigned long expires;
+ u8 cmd;
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+ head = &(adapter->fhash.fhead[i]);
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
+ {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ int i;
+ u8 cmd;
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+ head = &(adapter->fhash.fhead[i]);
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+ req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
+
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ if (qlcnic_set_fw_loopback(adapter, mode))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ mode = VPORT_MISS_MODE_DROP;
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
+ return 0;
+}
+
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
+ ((u64) adapter->portnum << 16));
+
+ req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
+ req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
+ ((u64) adapter->ahw->coal.rx_time_us) << 16);
+ req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
+ ((u64) adapter->ahw->coal.type) << 32 |
+ ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parameters\n");
+
+ return rv;
+}
+
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO;
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO;
+ }
+
+ req.words[0] = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
+
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
+ */
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_ipaddr *ipa;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ ipa = (struct qlcnic_ipaddr *)&req.words[1];
+ ipa->ipv4 = ip;
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+}
+
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
+ " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+ netdev_features_t features)
+{
+ u32 offload_flags = adapter->offload_flags;
+
+ if (offload_flags & BIT_0) {
+ features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ adapter->rx_csum = 1;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ if (!(offload_flags & BIT_1))
+ features &= ~NETIF_F_TSO;
+ else
+ features |= NETIF_F_TSO;
+
+ if (!(offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO6;
+ else
+ features |= NETIF_F_TSO6;
+ }
+ } else {
+ features &= ~(NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ adapter->rx_csum = 0;
+ }
+
+ return features;
+}
+
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed;
+
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+ features = qlcnic_process_flags(adapter, features);
+ } else {
+ changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+ }
+ }
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ netdev->features ^= NETIF_F_LRO;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = ahw->pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static int
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+ if (window == 0) {
+ dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+ return -EIO;
+ }
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ return -EIO;
+ }
+ return 0;
+}
+
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+ if (!rv)
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return rv;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ int *err)
+{
+ unsigned long flags;
+ int rv;
+ u32 data = -1;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
+ u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
+
+ return addr;
+}
+
+static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
+ u32 window, u64 off, u64 *data, int op)
+{
+ void __iomem *addr;
+ u32 start;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ writel(window, adapter->ahw->ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw->ocm_win_crb);
+ start = QLCNIC_PCI_OCM0_2M + off;
+
+ addr = adapter->ahw->pci_base0 + start;
+
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+ /* Set window to 0 */
+ writel(0, adapter->ahw->ocm_win_crb);
+ readl(adapter->ahw->ocm_win_crb);
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return 0;
+}
+
+static void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+static void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+
+
+/* Set MS memory control data for different adapters */
+static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
+ struct qlcnic_ms_reg_ctrl *ms)
+{
+ ms->control = QLCNIC_MS_CTRL;
+ ms->low = QLCNIC_MS_ADDR_LO;
+ ms->hi = QLCNIC_MS_ADDR_HI;
+ if (off & 0xf) {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_LO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_HI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
+ } else {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_LO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_HI;
+ }
+
+ ms->ocm_window = OCM_WIN_P3P(off);
+ ms->off = GET_MEM_OFFS_2M(off);
+}
+
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ int j, ret = 0;
+ u32 temp, off8;
+ struct qlcnic_ms_reg_ctrl ms;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
+
+ qlcnic_set_ms_controls(adapter, off, &ms);
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, &data, 1);
+
+ off8 = off & ~0xf;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
+
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ /* This is the modify part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
+ qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
+ /* This is the write part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
+ qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
+
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ struct qlcnic_ms_reg_ctrl ms;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
+
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ qlcnic_set_ms_controls(adapter, off, &ms);
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, data, 0);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ off8 = off & ~0xf;
+
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
+
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+
+ temp = qlcnic_ind_rd(adapter, ms.rd[3]);
+ val = (u64)temp << 32;
+ val |= qlcnic_ind_rd(adapter, ms.rd[2]);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic, err = 0;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw->board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+ if (err == -EIO)
+ return err;
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+ adapter->ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ adapter->ahw->port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+ int err = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return;
+}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int err = 0;
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset, &err);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
+
+int qlcnic_82xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return err;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 000000000..cbe2399c3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,225 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
+#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
+#define QLCNIC_CMD_GET_LED_STATUS 0x3C
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_BC_EVENT 0x8002
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 5000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, u16 vlan_id);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+int qlcnic_82xx_shutdown(struct pci_dev *);
+int qlcnic_82xx_resume(struct qlcnic_adapter *);
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
+void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000..be41e4c77
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -0,0 +1,1310 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->skb == NULL)
+ continue;
+
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf++;
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+
+ spin_lock(&tx_ring->tx_clean_lock);
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+
+ spin_unlock(&tx_ring->tx_clean_lock);
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ return;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i;
+
+ recv_ctx = adapter->recv_ctx;
+
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
+ goto err_out;
+
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->ahw->capabilities &
+ QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL)
+ goto err_out;
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+ int err = 0;
+
+ cond_resched();
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ u32 addr, u32 *valp)
+{
+ int err = 0;
+
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+ if (err == -EIO)
+ return err;
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, err = 0;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off, val;
+ struct pci_dev *pdev = adapter->pdev;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
+
+ /* Halt all the indiviual PEGs and other blocks */
+ /* disable all I2Q */
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff);
+ /* disable xge rx/tx */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00);
+ /* disable xg1 rx/tx */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
+
+ /* halt sre */
+ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+ if (err == -EIO)
+ return err;
+ QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
+
+ /* halt epg */
+ QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1);
+
+ /* halt timers */
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0);
+ /* halt pegs */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(20);
+
+ qlcnic_rom_unlock(adapter);
+ /* big hammer don't reset CAM block on reset */
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+
+ /* Init HW CRB block */
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* Initialize protocol process engine */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+ usleep_range(1000, 1500);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ return 0;
+}
+
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_DEV_GET_DRV(val, adapter->portnum);
+ if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+ dev_err(&adapter->pdev->dev,
+ "Not an Ethernet NIC func=%u\n", val);
+ return -EIO;
+ }
+ adapter->ahw->physical_port = (val >> 2);
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = QLCNIC_INIT_TIMEOUT_SECS;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = QLCNIC_RESET_TIMEOUT_SECS;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
+ return -EIO;
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_flt_entry fw_entry;
+ u32 ver = -1, min_ver;
+ int ret;
+
+ if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
+ &fw_entry);
+ else
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
+ &fw_entry);
+
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
+
+ ver = QLCNIC_DECODE_VERSION(ver);
+ min_ver = QLCNIC_MIN_FW_VERSION;
+
+ if (ver < min_ver) {
+ dev_err(&adapter->pdev->dev,
+ "firmware version %d.%d.%d unsupported."
+ "Min supported version %d.%d.%d\n",
+ _major(ver), _minor(ver), _build(ver),
+ _major(min_ver), _minor(min_ver), _build(min_ver));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability = 0;
+ int err = 0;
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+ if (err == -EIO)
+ return err;
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i, entries;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ entries = le32_to_cpu(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ u32 offs = le32_to_cpu(directory->findex) +
+ i * le32_to_cpu(directory->entry_size);
+ u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define FILEHEADER_SIZE (14 * 4)
+
+static int
+qlcnic_validate_header(struct qlcnic_adapter *adapter)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 entries, entry_size, tab_size, fw_file_size;
+
+ fw_file_size = adapter->fw->size;
+
+ if (fw_file_size < FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = le32_to_cpu(directory->num_entries);
+ entry_size = le32_to_cpu(directory->entry_size);
+ tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ u32 offs, tab_size, data_size, idx;
+ const u8 *unirom = adapter->fw->data;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+ idx = le32_to_cpu(temp);
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_fw(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ u32 offs, tab_size, data_size, idx;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ idx = le32_to_cpu(temp);
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = qlcnic_has_mn(adapter);
+ u32 entries, entry_size, tab_size, i;
+ __le32 temp;
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (!ptab_descr)
+ return -EINVAL;
+
+ entries = le32_to_cpu(ptab_descr->num_entries);
+ entry_size = le32_to_cpu(ptab_descr->entry_size);
+ tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ u32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw->revision_id;
+ u32 flagbit;
+
+ offs = le32_to_cpu(ptab_descr->findex) +
+ i * le32_to_cpu(ptab_descr->entry_size);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
+ flags = le32_to_cpu(temp);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
+ file_chiprev = le32_to_cpu(temp);
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -EINVAL;
+}
+
+static int
+qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *tab_desc;
+ u32 offs, idx;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
+ idx = le32_to_cpu(temp);
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+ struct uni_data_desc *data_desc;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+ struct uni_data_desc *data_desc;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *data_desc;
+ const u8 *unirom = adapter->fw->data;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return le32_to_cpu(data_desc->size);
+ else
+ return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ u32 major, minor, sub;
+ __le32 version_offset;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
+ return le32_to_cpu(version_offset);
+ }
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
+ le32_to_cpu(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ u32 bios_ver, prd_off = adapter->file_prd_off;
+ u8 *version_offset;
+ __le32 temp;
+
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
+ return le32_to_cpu(*(__le32 *)version_offset);
+ }
+
+ temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
+ bios_ver = le32_to_cpu(temp);
+
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+ dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
+
+ qlcnic_pcie_sem_unlock(adapter, 2);
+}
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+ u32 heartbeat, ret = -EIO;
+ int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ return ret;
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ if ((adapter->flags & QLCNIC_FW_HANG) ||
+ qlcnic_check_fw_hearbeat(adapter)) {
+ qlcnic_rom_lock_recovery(adapter);
+ return 1;
+ }
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ if (adapter->fw)
+ return 1;
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ __le64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->ahw->fw_type]);
+
+ if (fw) {
+ u64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+ int ret;
+ struct qlcnic_flt_entry bootld_entry;
+
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
+ &bootld_entry);
+ if (!ret) {
+ size = bootld_entry.size / 8;
+ flashaddr = bootld_entry.start_addr;
+ } else {
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+ dev_info(&pdev->dev,
+ "using legacy method to get flash fw region");
+ }
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ usleep_range(1000, 1500);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u32 ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->ahw->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_validate_unified_romimage(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if (val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if (ver < QLCNIC_MIN_FW_VERSION) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if (val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->ahw->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->ahw->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->ahw->fw_type],
+ &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ usleep_range(1000, 1500);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644
index 000000000..d4b5085a2
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -0,0 +1,2230 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <linux/printk.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_TX_ETHER_PKT 0x01
+#define QLCNIC_TX_TCP_PKT 0x02
+#define QLCNIC_TX_UDP_PKT 0x03
+#define QLCNIC_TX_IP_PKT 0x04
+#define QLCNIC_TX_TCP_LSO 0x05
+#define QLCNIC_TX_TCP_LSO6 0x06
+#define QLCNIC_TX_ENCAP_PKT 0x07
+#define QLCNIC_TX_ENCAP_LSO 0x08
+#define QLCNIC_TX_TCPV6_PKT 0x0b
+#define QLCNIC_TX_UDPV6_PKT 0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
+#define QLCNIC_FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode |= \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x07FFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
+
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+#define QLCNIC_TX_POLL_BUDGET 128
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
+
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
+
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (qlcnic_83xx_check(adapter))
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_filter *fil,
+ void *addr, u16 vlan_id)
+{
+ int ret;
+ u8 op;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (ret)
+ return;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (!ret) {
+ hlist_del(&fil->fnode);
+ adapter->rx_fhash.fnum--;
+ }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+ void *addr, u16 vlan_id)
+{
+ struct qlcnic_filter *tmp_fil = NULL;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
+ tmp_fil->vlan_id == vlan_id)
+ return tmp_fil;
+ }
+
+ return NULL;
+}
+
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, op;
+ int ret;
+
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
+ (adapter->fhash.fbucket_size - 1);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ time = tmp_fil->ftime;
+ if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ head = &adapter->fhash.fhead[hindex];
+
+ spin_lock(&adapter->mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&tmp_fil->fnode);
+ adapter->fhash.fnum--;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ return;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ head = &adapter->rx_fhash.fhead[hindex];
+
+ spin_lock(&adapter->rx_mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil)
+ qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+ vlan_id);
+
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ u16 vlan_id)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ u16 protocol = ntohs(skb->protocol);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ u64 src_addr = 0;
+ u16 vlan_id = 0;
+ u8 hindex, hval;
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_id = skb_vlan_tag_get(skb);
+ }
+ }
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
+ tmp_fil->vlan_id == vlan_id) {
+ if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id);
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ spin_lock(&adapter->mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+ int copied, copy_len, descr_size;
+ u32 producer = tx_ring->producer;
+ struct cmd_desc_type0 *hwdesc;
+ u16 flags = 0, encap_descr = 0;
+
+ opcode = QLCNIC_TX_ETHER_PKT;
+ encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+ if (skb_is_gso(skb)) {
+ inner_hdr_len = skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) -
+ skb_inner_mac_header(skb);
+
+ /* VXLAN header size = 8 */
+ outer_hdr_len = skb_transport_offset(skb) + 8 +
+ sizeof(struct udphdr);
+ first_desc->outer_hdr_length = outer_hdr_len;
+ total_hdr_len = inner_hdr_len + outer_hdr_len;
+ encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+ QLCNIC_ENCAP_DO_L4_CSUM;
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = inner_hdr_len;
+
+ /* Copy inner and outer headers in Tx descriptor(s)
+ * If total_hdr_len > cmd_desc_type0, use multiple
+ * descriptors
+ */
+ copied = 0;
+ descr_size = (int)sizeof(struct cmd_desc_type0);
+ while (copied < total_hdr_len) {
+ copy_len = min(descr_size, (total_hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc,
+ copy_len);
+ copied += copy_len;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+
+ /* Make sure updated tx_ring->producer is visible
+ * for qlcnic_tx_avail()
+ */
+ smp_mb();
+ adapter->stats.encap_lso_frames++;
+
+ opcode = QLCNIC_TX_ENCAP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ } else {
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ }
+
+ adapter->stats.encap_tx_csummed++;
+ opcode = QLCNIC_TX_ENCAP_PKT;
+ }
+
+ /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+ if (ip_hdr(skb)->version == 6)
+ encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+ /* outer IP header's size in 32bit words size*/
+ encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+ /* outer IP header offset */
+ encap_descr |= skb_network_offset(skb) << 10;
+ first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+ first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+ skb->data;
+ first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 l4proto, opcode = 0, hdr_len = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len, size;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+ u16 protocol = ntohs(skb->protocol);
+ u32 producer = tx_ring->producer;
+
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ flags = QLCNIC_FLAGS_VLAN_TAGGED;
+ vlan_tci = ntohs(vh->h_vlan_TCI);
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ } else if (skb_vlan_tag_present(skb)) {
+ flags = QLCNIC_FLAGS_VLAN_OOB;
+ vlan_tci = skb_vlan_tag_get(skb);
+ }
+ if (unlikely(adapter->tx_pvid)) {
+ if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = QLCNIC_FLAGS_VLAN_OOB;
+ vlan_tci = adapter->tx_pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (*(skb->data) & BIT_0) {
+ flags |= BIT_0;
+ memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+ }
+ opcode = QLCNIC_TX_ETHER_PKT;
+ if (skb_is_gso(skb)) {
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+ QLCNIC_TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+ first_desc->hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+
+ /* Only in case of TSO on vlan device */
+ flags |= QLCNIC_FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16,
+ copy_len - 16);
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+ size = (int)sizeof(struct cmd_desc_type0) - offset;
+ copy_len = min(size, (hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc +
+ offset, copy_len);
+ copied += copy_len;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (protocol == ETH_P_IP) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = QLCNIC_TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = QLCNIC_TX_UDP_PKT;
+ } else if (protocol == ETH_P_IPV6) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = QLCNIC_TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = QLCNIC_TX_UDPV6_PKT;
+ }
+ }
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = skb_frag_size(frag);
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+ desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ struct ethhdr *phdr;
+ int i, k, frag_count, delta = 0;
+ u32 producer, num_txd;
+ u16 protocol;
+ bool l4_is_udp = false;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_tx_stop_all_queues(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ goto drop_packet;
+ }
+
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_tx_start_queue(tx_ring->txq);
+ } else {
+ tx_ring->tx_stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pdev = adapter->pdev;
+ first_desc = &tx_ring->desc_head[producer];
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
+ goto drop_packet;
+ }
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
+
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_IP)
+ l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (protocol == ETH_P_IPV6)
+ l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+ /* Check if it is a VXLAN packet */
+ if (!skb->encapsulation || !l4_is_udp ||
+ !qlcnic_encap_tx_offload(adapter)) {
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+ tx_ring)))
+ goto unwind_buff;
+ } else {
+ if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+ skb, tx_ring)))
+ goto unwind_buff;
+ }
+
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb);
+
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
+
+ qlcnic_update_cmd_producer(tx_ring);
+
+ return NETDEV_TX_OK;
+
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw->linkup && !linkup) {
+ netdev_info(netdev, "NIC Link is down\n");
+ adapter->ahw->linkup = 0;
+ netif_carrier_off(netdev);
+ } else if (!adapter->ahw->linkup && linkup) {
+ adapter->ahw->linkup = 1;
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
+ return;
+ }
+
+ netdev_info(netdev, "NIC Link is up\n");
+ netif_carrier_on(netdev);
+ }
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+ if (!skb) {
+ adapter->stats.skb_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+
+ return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ uint32_t producer, handle;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
+{
+ u32 sw_consumer, hw_consumer;
+ int i, done, count = 0;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+ tx_ring->tx_stats.xmit_finished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= budget)
+ break;
+ }
+
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
+ smp_mb();
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_tx_wake_queue(tx_ring->txq);
+ tx_ring->tx_stats.xmit_on++;
+ }
+ }
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete, work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
+ budget);
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len, link_speed;
+ u8 link_status, module, duplex, autoneg, lb_status = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->ahw->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+ lb_status = (msg->body[2] >> 32) & 0x3;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev,
+ "unsupported cable: OUI 0x%x, length %d\n",
+ cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+ lb_status == QLCNIC_ELB_MODE))
+ adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+
+ adapter->ahw->module_type = module;
+ adapter->ahw->link_autoneg = autoneg;
+
+ if (link_status) {
+ adapter->ahw->link_speed = link_speed;
+ } else {
+ adapter->ahw->link_speed = SPEED_UNKNOWN;
+ adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ struct qlcnic_adapter *adapter;
+ struct device *dev;
+ int i = 0, opcode, ret;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ adapter = sds_ring->adapter;
+ dev = &adapter->pdev->dev;
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(adapter, &msg);
+ break;
+ case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+ ret = (u32)(msg.body[1]);
+ switch (ret) {
+ case 0:
+ adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+ break;
+ case 1:
+ dev_info(dev, "loopback already in progress\n");
+ adapter->ahw->diag_cnt = -EINPROGRESS;
+ break;
+ case 2:
+ dev_info(dev, "loopback cable is not connected\n");
+ adapter->ahw->diag_cnt = -ENODEV;
+ break;
+ default:
+ dev_info(dev,
+ "loopback configure request failed, err %x\n",
+ ret);
+ adapter->ahw->diag_cnt = -EIO;
+ break;
+ }
+ break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &ring->rx_buf_arr[index];
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+ (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+
+ buffer->skb = NULL;
+
+ return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->rx_pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->rx_pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring, int ring,
+ u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
+ u32 seq_number;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
+ th->psh = push;
+ th->seq = htonl(seq_number);
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+
+ return buffer;
+}
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
+ u64 sts_data0, sts_data1;
+ u8 ring;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+ sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+ sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+ WARN_ON(desc_cnt > 1);
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ u32 producer, handle;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+ char prefix[30];
+
+ scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+ dev_name(&adapter->pdev->dev), __func__);
+
+ print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+ skb->data, skb->len, true);
+ }
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+ u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ length = qlcnic_get_sts_totallength(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return;
+}
+
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data0;
+ int ring, opcode, desc_cnt;
+
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ break;
+ default:
+ ring = qlcnic_get_sts_type(sts_data0);
+ qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+ break;
+ }
+
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ } else {
+ if (ring == (adapter->drv_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+ return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (qlcnic_encap_length(sts_data[1]) &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->csum_level = 1;
+ adapter->stats.encap_rx_csummed++;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
+}
+
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ budget = QLCNIC_TX_POLL_BUDGET;
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* need a repoll */
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_msix_sriov_vf_poll,
+ NAPI_POLL_WEIGHT);
+
+ } else {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000..367f3976d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -0,0 +1,4341 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#ifdef CONFIG_QLCNIC_VXLAN
+#include <net/vxlan.h>
+#endif
+
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+ "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
+
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0444);
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
+
+int qlcnic_use_msi = 1;
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi, qlcnic_use_msi, int, 0444);
+
+int qlcnic_use_msi_x = 1;
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
+
+int qlcnic_auto_fw_reset = 1;
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
+module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
+
+int qlcnic_load_fw_file;
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)");
+module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
+
+static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id qlcnic_pci_tbl[] = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
+static const struct qlcnic_board_info qlcnic_boards[] = {
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x243,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24a,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x246,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x252,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x26e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x260,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x266,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x269,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x271,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ 0x0, 0x0, "8300 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE8830,
+ 0x0,
+ 0x0,
+ "8830 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x0, 0x0, "cLOM8214 1/10GbE Controller" },
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ kfree(recv_ctx->sds_rings);
+ recv_ctx->sds_rings = NULL;
+}
+
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
+
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head;
+
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ return;
+ }
+ }
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+
+ if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+ return -EOPNOTSUPP;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
+ return 0;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ qlcnic_delete_adapter_mac(adapter);
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
+ if (is_unicast_ether_addr(addr)) {
+ err = dev_uc_del(netdev, addr);
+ if (!err)
+ err = qlcnic_nic_del_mac(adapter, addr);
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_del(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid, u16 flags)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr)) {
+ if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
+ err = dev_uc_add_excl(netdev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev,
+ struct net_device *filter_dev, int idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
+ idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+ return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ if (!adapter->fw_work.work.func)
+ return;
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
+#ifdef CONFIG_QLCNIC_VXLAN
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Adapter supports only one VXLAN port. Use very first port
+ * for enabling offload
+ */
+ if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ return;
+
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ (ahw->vxlan_port != ntohs(port)))
+ return;
+
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_fix_features = qlcnic_fix_features,
+ .ndo_set_features = qlcnic_set_features,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+ .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
+#ifdef CONFIG_QLCNIC_VXLAN
+ .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
+ .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+ .ndo_features_check = qlcnic_features_check,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+#ifdef CONFIG_QLCNIC_SRIOV
+ .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
+ .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
+#endif
+};
+
+static const struct net_device_ops qlcnic_netdev_failed_ops = {
+ .ndo_open = qlcnic_open,
+};
+
+static struct qlcnic_nic_template qlcnic_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .shutdown = qlcnic_82xx_shutdown,
+ .resume = qlcnic_82xx_resume,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
+};
+
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
+};
+
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
+ .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+ .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
+};
+
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_82xx_check(adapter) &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+}
+
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+restore:
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
+ if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+ return err;
+
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+
+ goto restore;
+ } else if (err < 0) {
+ return err;
+ }
+
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+ return 0;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err, vector;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ if (adapter->ahw->msix_supported) {
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ adapter->ahw->num_msix = num_msix;
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return 0;
+ } else if (err > 0) {
+ pci_disable_msix(pdev);
+
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
+ num_msix = rounddown_pow_of_two(err);
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ } else {
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
+ goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
+ }
+ }
+
+ return -EIO;
+}
+
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
+}
+
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ u32 offset, mask_reg;
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ offset = msi_tgt_status[adapter->ahw->pci_func];
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
+ offset);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return err;
+ }
+
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
+
+ legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
+ adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
+ offset = legacy_intrp->tgt_status_reg;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
+ mask_reg = legacy_intrp->tgt_mask_reg;
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return err;
+}
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ int num_msix, err = 0;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
+ return err;
+ }
+
+ return 0;
+}
+
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
+}
+
+static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
+{
+ if (ahw->pci_base0 != NULL)
+ iounmap(ahw->pci_base0);
+}
+
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_pci_info *pci_info;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (ahw->port_type) {
+ case QLCNIC_GBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_pci_info *pci_info;
+ int i, id = 0, ret = 0, j = 0;
+ u16 act_pci_func;
+ u8 pfn;
+
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret)
+ goto err_pci_info;
+
+ act_pci_func = ahw->total_nic_func;
+
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ act_pci_func, GFP_KERNEL);
+ if (!adapter->npars) {
+ ret = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+ if (!adapter->eswitch) {
+ ret = -ENOMEM;
+ goto err_npars;
+ }
+
+ for (i = 0; i < ahw->max_vnic_func; i++) {
+ pfn = pci_info[i].id;
+
+ if (pfn >= ahw->max_vnic_func) {
+ ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
+ goto err_eswitch;
+ }
+
+ if (!pci_info[i].active ||
+ (pci_info[i].type != QLCNIC_TYPE_NIC))
+ continue;
+
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
+ adapter->npars[j].pci_func = pfn;
+ adapter->npars[j].active = (u8)pci_info[i].active;
+ adapter->npars[j].type = (u8)pci_info[i].type;
+ adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
+ adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
+ j++;
+ }
+
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ }
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
+ kfree(adapter->eswitch);
+ adapter->eswitch = NULL;
+err_npars:
+ kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
+
+ return ret;
+}
+
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int ret;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ret = qlcnic_api_lock(adapter);
+ if (ret)
+ goto err_lock;
+
+ id = ahw->pci_func;
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
+ QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ qlcnic_api_unlock(adapter);
+err_lock:
+ return ret;
+}
+
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+
+ /* Determine FW API version */
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
+
+ /* Find PCI function number */
+ qlcnic_get_func_no(adapter);
+
+ /* Determine function privilege level */
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else
+ adapter->nic_ops = &qlcnic_ops;
+}
+
+#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
+{
+ switch (dev_id) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ *bar = QLCNIC_82XX_BAR0_LENGTH;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
+ default:
+ *bar = 0;
+ }
+}
+
+static int qlcnic_setup_pci_map(struct pci_dev *pdev,
+ struct qlcnic_hardware_context *ahw)
+{
+ u32 offset;
+ void __iomem *mem_ptr0 = NULL;
+ unsigned long mem_len, pci_len0 = 0, bar0_len;
+
+ /* remap phys address */
+ mem_len = pci_resource_len(pdev, 0);
+
+ qlcnic_get_bar_length(pdev->device, &bar0_len);
+ if (mem_len >= bar0_len) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
+
+ ahw->pci_base0 = mem_ptr0;
+ ahw->pci_len0 = pci_len0;
+ offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
+ qlcnic_get_ioaddr(ahw, offset);
+
+ return 0;
+}
+
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned short subsystem_vendor;
+ bool ret = true;
+
+ subsystem_vendor = pdev->subsystem_vendor;
+
+ if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
+ qlcnic_boards[index].sub_device == pdev->subsystem_device)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
+static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_validate_subsystem_id(adapter, i)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+ else
+ sprintf(name, "%pM: %s" , adapter->mac_addr,
+ qlcnic_boards[i].short_name);
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+
+ prev_fw_version = adapter->fw_version;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ if (fw_dump->tmpl_hdr == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev,
+ "Supports FW dump capability\n");
+ }
+ }
+
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (adapter->ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
+ if (err)
+ return err;
+
+ adapter->ahw->physical_port = (u8)nic_info.phys_port;
+ adapter->ahw->switch_mode = nic_info.switch_mode;
+ adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
+ adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
+ adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+ if (err == -EIO)
+ return err;
+ adapter->ahw->extra_capability[0] = temp;
+ } else {
+ adapter->ahw->extra_capability[0] = 0;
+ }
+
+ adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
+ adapter->ahw->max_mtu = nic_info.max_mtu;
+
+ if (adapter->ahw->capabilities & BIT_6) {
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
+
+ return err;
+}
+
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ if (esw_cfg->discard_tagged)
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ else
+ adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+ if (esw_cfg->vlan_id) {
+ adapter->rx_pvid = esw_cfg->vlan_id;
+ adapter->tx_pvid = esw_cfg->vlan_id;
+ } else {
+ adapter->rx_pvid = 0;
+ adapter->tx_pvid = 0;
+ }
+}
+
+static int
+qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot add VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
+ set_bit(vid, adapter->vlans);
+ return 0;
+}
+
+static int
+qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot delete VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+ clear_bit(vid, adapter->vlans);
+ return 0;
+}
+
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
+ QLCNIC_PROMISC_DISABLED);
+
+ if (esw_cfg->mac_anti_spoof)
+ adapter->flags |= QLCNIC_MACSPOOF;
+
+ if (!esw_cfg->mac_override)
+ adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ if (!esw_cfg->promisc_mode)
+ adapter->flags |= QLCNIC_PROMISC_DISABLED;
+}
+
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return 0;
+
+ esw_cfg.pci_func = adapter->ahw->pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+ return -EIO;
+ qlcnic_set_vlan_config(adapter, &esw_cfg);
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+ qlcnic_set_netdev_features(adapter, &esw_cfg);
+
+ return 0;
+}
+
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
+
+ adapter->offload_flags = esw_cfg->offload_flags;
+ adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+ netdev_update_features(netdev);
+ adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ int err = 0;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+ return 0;
+
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (priv_level == QLCNIC_MGMT_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ err = qlcnic_init_pci_info(adapter);
+ if (err)
+ return err;
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->ahw->fw_hal_version);
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ }
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ }
+
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ return err;
+}
+
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ struct qlcnic_npar_info *npar;
+ u8 i;
+
+ if (adapter->need_fw_reset)
+ return 0;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+ esw_cfg.pci_func = adapter->npars[i].pci_func;
+ esw_cfg.mac_override = BIT_0;
+ esw_cfg.promisc_mode = BIT_0;
+ if (qlcnic_82xx_check(adapter)) {
+ esw_cfg.offload_flags = BIT_0;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ }
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+ npar = &adapter->npars[i];
+ npar->pvid = esw_cfg.vlan_id;
+ npar->mac_override = esw_cfg.mac_override;
+ npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+ npar->discard_tagged = esw_cfg.discard_tagged;
+ npar->promisc_mode = esw_cfg.promisc_mode;
+ npar->offload_flags = esw_cfg.offload_flags;
+ }
+
+ return 0;
+}
+
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_info *npar, int pci_func)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+ esw_cfg.pci_func = pci_func;
+ esw_cfg.vlan_id = npar->pvid;
+ esw_cfg.mac_override = npar->mac_override;
+ esw_cfg.discard_tagged = npar->discard_tagged;
+ esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+ esw_cfg.offload_flags = npar->offload_flags;
+ esw_cfg.promisc_mode = npar->promisc_mode;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+{
+ int i, err;
+ struct qlcnic_npar_info *npar;
+ struct qlcnic_info nic_info;
+ u8 pci_func;
+
+ if (qlcnic_82xx_check(adapter))
+ if (!adapter->need_fw_reset)
+ return 0;
+
+ /* Set the NPAR config data after FW reset */
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ npar = &adapter->npars[i];
+ pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (err)
+ return err;
+ nic_info.min_tx_bw = npar->min_bw;
+ nic_info.max_tx_bw = npar->max_bw;
+ err = qlcnic_set_nic_info(adapter, &nic_info);
+ if (err)
+ return err;
+
+ if (npar->enable_pm) {
+ err = qlcnic_config_port_mirroring(adapter,
+ npar->dest_npar, 1,
+ pci_func);
+ if (err)
+ return err;
+ }
+ err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+ u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+ u32 npar_state;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+ msleep(1000);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ }
+ if (!npar_opt_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for NPAR state to operational timeout\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return 0;
+
+ err = qlcnic_set_default_offload_settings(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_reset_npar_config(adapter);
+ if (err)
+ return err;
+
+ qlcnic_dev_set_npar_ready(adapter);
+
+ return err;
+}
+
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
+ return err;
+ else if (!err)
+ goto check_fw_status;
+
+ if (qlcnic_load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else {
+ err = qlcnic_check_flash_fw_ver(adapter);
+ if (err)
+ goto err_out;
+
+ adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ }
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err == 0)
+ goto check_fw_status;
+
+ err = qlcnic_pinit_from_rom(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
+
+check_fw_status:
+ err = qlcnic_check_fw_status(adapter);
+ if (err)
+ goto err_out;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ qlcnic_idc_debug_info(adapter, 1);
+ err = qlcnic_check_eswitch_mode(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failed for eswitch\n");
+ goto err_out;
+ }
+ err = qlcnic_set_mgmt_operations(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_check_options(adapter);
+ adapter->need_fw_reset = 0;
+
+ qlcnic_release_firmware(adapter);
+ return 0;
+
+err_out:
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int err, ring, num_sds_rings;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
+ else
+ handler = qlcnic_83xx_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ num_sds_rings = adapter->drv_sds_rings;
+ for (ring = 0; ring < num_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
+ (ring == (num_sds_rings - 1))) {
+ if (!(adapter->flags &
+ QLCNIC_MSIX_ENABLED))
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "qlcnic");
+ else
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-tx-0-rx-%d",
+ netdev->name, ring);
+ } else {
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-rx-%d",
+ netdev->name, ring);
+ }
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(tx_ring->name),
+ "%s-tx-%d", netdev->name, ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
+ }
+}
+
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
+{
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return 0;
+
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return -EIO;
+
+ qlcnic_get_lro_mss_capability(adapter);
+
+ if (qlcnic_fw_create_ctx(adapter))
+ return -EIO;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw->linkup = 0;
+
+ if (adapter->drv_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_def_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ adapter->ahw->reset_context = 0;
+ netif_tx_start_all_queues(netdev);
+ return 0;
+}
+
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ netif_carrier_off(netdev);
+ adapter->ahw->linkup = 0;
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ if (adapter->fhash.fnum)
+ qlcnic_delete_lb_filters(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
+
+ qlcnic_reset_rx_buffers_list(adapter);
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ goto err_out_napi_del;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_hw;
+ }
+
+ qlcnic_create_sysfs_entries(adapter);
+
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (qlcnic_encap_rx_offload(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_hw:
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+ qlcnic_napi_del(adapter);
+ return err;
+}
+
+void qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_detach(adapter);
+
+ adapter->ahw->diag_test = 0;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
+
+ adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+ GFP_KERNEL);
+ if (!adapter->recv_ctx) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+err_out:
+ return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ kfree(adapter->recv_ctx);
+ adapter->recv_ctx = NULL;
+
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
+ }
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ qlcnic_linkevent_request(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+
+ return 0;
+}
+
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err) {
+ __qlcnic_up(adapter, netdev);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ }
+
+ netif_device_attach(netdev);
+ }
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
+int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
+ int pci_using_dac)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->ahw->mc_enabled = 0;
+ qlcnic_set_mac_filter_count(adapter);
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ if (qlcnic_encap_tx_offload(adapter)) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ /* encapsulation Tx offload supported by Adapter */
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ }
+
+ if (qlcnic_encap_rx_offload(adapter))
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
+ netdev->hw_features = netdev->features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ return err;
+
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
+{
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ *pci_using_dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ *pci_using_dac = 0;
+ else {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+ tx_ring = kcalloc(adapter->drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
+
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
+ }
+
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->drv_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_cmd = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
+ else if (qlcnic_83xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
+ qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
+}
+
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_api_lock(adapter);
+ qlcnic_api_unlock(adapter);
+}
+
+
+static int
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
+ int err, pci_using_dac = -1;
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+ if (err)
+ goto err_out_disable_pdev;
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+ pci_enable_pcie_error_reporting(pdev);
+
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ qlcnic_83xx_register_map(ahw);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ qlcnic_sriov_vf_register_map(ahw);
+ break;
+ default:
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
+
+ err = qlcnic_alloc_adapter_resources(adapter);
+ if (err)
+ goto err_out_free_wq;
+
+ adapter->dev_rst_time = jiffies;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
+
+ rwlock_init(&adapter->ahw->crb_lock);
+ mutex_init(&adapter->ahw->mem_lock);
+
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ qlcnic_register_dcb(adapter);
+
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ qlcnic_reset_api_lock(adapter);
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+ "\t\tIf reboot doesn't help, try flashing the card\n");
+ goto err_out_maintenance_mode;
+ }
+
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+ }
+
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
+
+ adapter->flags |= QLCNIC_NEED_FLR;
+
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter, pci_using_dac);
+ if (err) {
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
+ goto err_out_maintenance_mode;
+ }
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
+ goto err_out_free_hw;
+ }
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ qlcnic_read_phys_port_id(adapter);
+
+ if (adapter->portnum == 0) {
+ qlcnic_get_board_name(adapter, board_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ board_name, adapter->ahw->revision_id);
+ }
+
+ if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+ !!qlcnic_use_msi)
+ dev_warn(&pdev->dev,
+ "Device does not support MSI interrupts\n");
+
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_dcb_get_info(adapter->dcb);
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+ }
+
+ err = qlcnic_get_act_pci_func(adapter);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ pci_set_drvdata(pdev, adapter);
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ if (adapter->drv_mac_learn)
+ qlcnic_alloc_lb_filters_mem(adapter);
+
+ qlcnic_add_sysfs(adapter);
+ qlcnic_register_hwmon_dev(adapter);
+ return 0;
+
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+err_out_free_hw:
+ qlcnic_free_adapter_resources(adapter);
+
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(ahw);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+
+err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
+ netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
+ err = register_netdev(netdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ qlcnic_clr_all_drv_state(adapter, 0);
+ goto err_out_free_hw;
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ qlcnic_add_sysfs(adapter);
+
+ return 0;
+}
+
+static void qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
+ ahw = adapter->ahw;
+
+ unregister_netdev(netdev);
+ qlcnic_sriov_cleanup(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
+ }
+
+ qlcnic_dcb_free(adapter->dcb);
+ qlcnic_detach(adapter);
+ kfree(adapter->npars);
+ kfree(adapter->eswitch);
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_free_lb_filters_mem(adapter);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_sysfs(adapter);
+
+ qlcnic_unregister_hwmon_dev(adapter);
+
+ qlcnic_cleanup_pci_map(adapter->ahw);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
+ }
+
+ qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
+ free_netdev(netdev);
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ return __qlcnic_resume(adapter);
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
+
+ return -EIO;
+ }
+
+ netif_carrier_off(netdev);
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ qlcnic_detach(adapter);
+
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+
+ return 0;
+}
+
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ void *head;
+ int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
+
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ return;
+
+ act_pci_func = adapter->ahw->total_nic_func;
+ spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->fhash.fmax = (filter_size / act_pci_func);
+ adapter->fhash.fhead = head;
+
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fmax)
+ kfree(adapter->fhash.fhead);
+
+ adapter->fhash.fhead = NULL;
+ adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
+}
+
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_state, temp_val, temp = 0;
+ int rv = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
+ if (qlcnic_82xx_check(adapter))
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->ahw->temp = temp_state;
+ return rv;
+}
+
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
+{
+ int i;
+ struct cmd_desc_type0 *tx_desc_info;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ tx_desc_info = &tx_ring->desc_head[i];
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ if (!rds_ring)
+ continue;
+ netdev_info(netdev,
+ "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n",
+ ring, readl(rds_ring->crb_rcv_producer),
+ rds_ring->producer, rds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ if (!sds_ring)
+ continue;
+ netdev_info(netdev,
+ "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n",
+ ring, readl(sds_ring->crb_sts_consumer),
+ sds_ring->consumer, readl(sds_ring->crb_intr_mask),
+ sds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (!tx_ring)
+ continue;
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
+ netdev_info(netdev,
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_err(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ qlcnic_dump_rings(adapter);
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS ||
+ netif_msg_tx_err(adapter->ahw)) {
+ netdev_err(netdev, "Tx timeout, reset the adapter.\n");
+ if (qlcnic_82xx_check(adapter))
+ adapter->need_fw_reset = 1;
+ else if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ } else {
+ netdev_err(netdev, "Tx timeout, reset adapter context.\n");
+ adapter->ahw->reset_context = 1;
+ }
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_update_stats(adapter);
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->ahw->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_schedule(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ /* Only Multi-Tx queue capable devices need to
+ * schedule NAPI for TX rings
+ */
+ if ((qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_schedule(&tx_ring->napi);
+ }
+ }
+}
+#endif
+
+static void
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+
+ if (failed) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
+ dev_info(&adapter->pdev->dev,
+ "Device state set to Failed. Please Reboot\n");
+ } else if (!(val & 0x11111111))
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/* Grab api lock, before checking state */
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+
+ if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ active_mask = (~(1 << (ahw->pci_func * 4)));
+ act = act & active_mask;
+ }
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ }
+
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+ return ret;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
+ qlcnic_api_unlock(adapter);
+ return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ do {
+ msleep(1000);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (prev_state == QLCNIC_DEV_QUISCENT)
+ continue;
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
+
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
+ return -1;
+ }
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+
+ return ret;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ u32 dev_state = 0xf;
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err_ret;
+
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT ||
+ dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
+ }
+
+ if (dev_state == QLCNIC_DEV_INITIALIZING ||
+ dev_state == QLCNIC_DEV_READY) {
+ dev_info(&adapter->pdev->dev, "Detected state change from "
+ "DEV_NEED_RESET, skipping ack check\n");
+ goto skip_ack_check;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ rtnl_lock();
+ if (qlcnic_check_fw_dump_state(adapter) &&
+ (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ QLCDB(adapter, DRV, "Take FW dump\n");
+ qlcnic_dump_fw(adapter);
+ adapter->flags |= QLCNIC_FW_HANG;
+ }
+ rtnl_unlock();
+
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ if (!adapter->nic_ops->start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ goto err_ret;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+wait_npar:
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
+ qlcnic_clr_all_drv_state(adapter, 0);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ /* Dont grab rtnl lock during Quiscent mode */
+ if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ } else
+ qlcnic_down(adapter, netdev);
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "Detaching the device: peg halt status1=0x%x\n",
+ status);
+
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ }
+
+ goto err_ret;
+ }
+
+ if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
+ dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
+ adapter->ahw->temp);
+ goto err_ret;
+ }
+
+ /* Dont ack if this instance is the reset owner */
+ if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set driver state,"
+ "detaching the device.\n");
+ goto err_ret;
+ }
+ }
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ netif_device_attach(netdev);
+ qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (state == QLCNIC_DEV_NPAR_NON_OPER)
+ return;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ u32 state, xg_val = 0, gb_val = 0;
+
+ qlcnic_xg_set_xg0_mask(xg_val);
+ qlcnic_xg_set_xg1_mask(xg_val);
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
+ qlcnic_gb_set_gb0_mask(gb_val);
+ qlcnic_gb_set_gb1_mask(gb_val);
+ qlcnic_gb_set_gb2_mask(gb_val);
+ qlcnic_gb_set_gb3_mask(gb_val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
+ dev_info(&adapter->pdev->dev, "Pause control frames disabled"
+ " on all ports\n");
+ adapter->need_fw_reset = 1;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
+ __func__);
+ qlcnic_api_unlock(adapter);
+
+ return;
+ }
+
+ if (state == QLCNIC_DEV_READY) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
+ adapter->flags |= QLCNIC_FW_RESET_OWNER;
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
+ QLCDB(adapter, DRV, "NPAR operational state set\n");
+
+ qlcnic_api_unlock(adapter);
+}
+
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ if (test_bit(__QLCNIC_AER, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 npar_state;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+ qlcnic_clr_all_drv_state(adapter, 0);
+ else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+ qlcnic_schedule_work(adapter, qlcnic_attach_work,
+ FW_POLL_DELAY);
+ else
+ goto attach;
+ QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+ return;
+ }
+attach:
+ qlcnic_dcb_get_info(adapter->dcb);
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+done:
+ netif_device_attach(netdev);
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ u32 state = 0, heartbeat;
+ u32 peg_status;
+ int err = 0;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset)
+ qlcnic_dev_request_reset(adapter, 0);
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET) {
+ qlcnic_set_npar_non_operational(adapter);
+ adapter->need_fw_reset = 1;
+ } else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ goto detach;
+
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ adapter->heartbeat = heartbeat;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
+ qlcnic_reset_hw_context(adapter);
+
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ adapter->flags |= QLCNIC_FW_HANG;
+
+ qlcnic_dev_request_reset(adapter, 0);
+
+ if (qlcnic_auto_fw_reset)
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
+ &adapter->state)) {
+
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
+ }
+
+ return 1;
+}
+
+void qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static int qlcnic_is_first_func(struct pci_dev *pdev)
+{
+ struct pci_dev *oth_pdev;
+ int val = pdev->devfn;
+
+ while (val-- > 0) {
+ oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
+ (pdev->bus), pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
+ if (!oth_pdev)
+ continue;
+
+ if (oth_pdev->current_state != PCI_D3cold) {
+ pci_dev_put(oth_pdev);
+ return 0;
+ }
+ pci_dev_put(oth_pdev);
+ }
+ return 1;
+}
+
+static int qlcnic_attach_func(struct pci_dev *pdev)
+{
+ int err, first_func;
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ pdev->error_state = pci_channel_io_normal;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ first_func = qlcnic_is_first_func(pdev);
+
+ if (qlcnic_api_lock(adapter))
+ return -EINVAL;
+
+ if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+ adapter->need_fw_reset = 1;
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ }
+ qlcnic_api_unlock(adapter);
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ return err;
+
+ qlcnic_clr_drv_state(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err) {
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_attach(netdev);
+ return err;
+ }
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ done:
+ netif_device_attach(netdev);
+ return err;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_detach(netdev);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+ qlcnic_teardown_intr(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+{
+ return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+{
+ u32 state;
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
+}
+
+
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_check_npar_opertional(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ qlcnic_check_options(adapter);
+
+ err = qlcnic_set_eswitch_port_config(adapter);
+ if (err)
+ return err;
+
+ adapter->need_fw_reset = 0;
+
+ return err;
+}
+
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
+
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ max_hw_rings = adapter->max_tx_rings;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
+ }
+
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
+ return -EINVAL;
+ }
+
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
+ }
+
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 tx_rings, rx_rings;
+ int err;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
+ netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_enable_mbx_poll(adapter);
+ }
+
+ qlcnic_teardown_intr(adapter);
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static void
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+}
+
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev;
+ u16 vid;
+
+ qlcnic_config_indev_addr(adapter, netdev, event);
+
+ rcu_read_lock();
+ for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
+ if (!dev)
+ continue;
+ qlcnic_config_indev_addr(adapter, dev, event);
+ }
+ rcu_read_unlock();
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ qlcnic_config_indev_addr(adapter, dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+static const struct pci_error_handlers qlcnic_err_handler = {
+ .error_detected = qlcnic_io_error_detected,
+ .slot_reset = qlcnic_io_slot_reset,
+ .resume = qlcnic_io_resume,
+};
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = qlcnic_remove,
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown,
+ .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
+
+};
+
+static int __init qlcnic_init_module(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+ ret = pci_register_driver(&qlcnic_driver);
+ if (ret) {
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+ }
+
+ return ret;
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644
index 000000000..332bb8a3f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -0,0 +1,1414 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
+
+#include <net/ip.h>
+
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+#define QLC_83XX_DMA_ENGINE_INDEX 8
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
+#define QLCNIC_DUMP_WCRB BIT_0
+#define QLCNIC_DUMP_RWCRB BIT_1
+#define QLCNIC_DUMP_ANDCRB BIT_2
+#define QLCNIC_DUMP_ORCRB BIT_3
+#define QLCNIC_DUMP_POLLCRB BIT_4
+#define QLCNIC_DUMP_RD_SAVE BIT_5
+#define QLCNIC_DUMP_WRT_SAVED BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
+#define QLCNIC_DUMP_SKIP BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX 0xff
+
+struct qlcnic_pex_dma_descriptor {
+ u32 read_data_size;
+ u32 dma_desc_cmd;
+ u32 src_addr_low;
+ u32 src_addr_high;
+ u32 dma_bus_addr_low;
+ u32 dma_bus_addr_high;
+ u32 rsvd[6];
+} __packed;
+
+struct qlcnic_common_entry_hdr {
+ u32 type;
+ u32 offset;
+ u32 cap_size;
+#if defined(__LITTLE_ENDIAN)
+ u8 mask;
+ u8 rsvd[2];
+ u8 flags;
+#else
+ u8 flags;
+ u8 rsvd[2];
+ u8 mask;
+#endif
+} __packed;
+
+struct __crb {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 stride;
+ u8 rsvd1[3];
+#else
+ u8 rsvd1[3];
+ u8 stride;
+#endif
+ u32 data_size;
+ u32 no_ops;
+ u32 rsvd2[4];
+} __packed;
+
+struct __ctrl {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 stride;
+ u8 index_a;
+ u16 timeout;
+#else
+ u16 timeout;
+ u8 index_a;
+ u8 stride;
+#endif
+ u32 data_size;
+ u32 no_ops;
+#if defined(__LITTLE_ENDIAN)
+ u8 opcode;
+ u8 index_v;
+ u8 shl_val;
+ u8 shr_val;
+#else
+ u8 shr_val;
+ u8 shl_val;
+ u8 index_v;
+ u8 opcode;
+#endif
+ u32 val1;
+ u32 val2;
+ u32 val3;
+} __packed;
+
+struct __cache {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u16 stride;
+ u16 init_tag_val;
+#else
+ u16 init_tag_val;
+ u16 stride;
+#endif
+ u32 size;
+ u32 no_ops;
+ u32 ctrl_addr;
+ u32 ctrl_val;
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 read_addr_stride;
+ u8 read_addr_num;
+ u8 rsvd1[2];
+#else
+ u8 rsvd1[2];
+ u8 read_addr_num;
+ u8 read_addr_stride;
+#endif
+} __packed;
+
+struct __ocm {
+ u8 rsvd[8];
+ u32 size;
+ u32 no_ops;
+ u8 rsvd1[8];
+ u32 read_addr;
+ u32 read_addr_stride;
+} __packed;
+
+struct __mem {
+ u32 desc_card_addr;
+ u32 dma_desc_cmd;
+ u32 start_dma_cmd;
+ u32 rsvd[3];
+ u32 addr;
+ u32 size;
+} __packed;
+
+struct __mux {
+ u32 addr;
+ u8 rsvd[4];
+ u32 size;
+ u32 no_ops;
+ u32 val;
+ u32 val_stride;
+ u32 read_addr;
+ u8 rsvd2[4];
+} __packed;
+
+struct __queue {
+ u32 sel_addr;
+#if defined(__LITTLE_ENDIAN)
+ u16 stride;
+ u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u16 stride;
+#endif
+ u32 size;
+ u32 no_ops;
+ u8 rsvd2[8];
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u8 rsvd3[2];
+#else
+ u8 rsvd3[2];
+ u8 read_addr_cnt;
+ u8 read_addr_stride;
+#endif
+} __packed;
+
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+#if defined(__LITTLE_ENDIAN)
+ u16 sel_val_stride;
+ u16 no_ops;
+#else
+ u16 no_ops;
+ u16 sel_val_stride;
+#endif
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u8 data_size;
+ u8 sel_val_stride;
+#endif
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
+struct qlcnic_dump_entry {
+ struct qlcnic_common_entry_hdr hdr;
+ union {
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
+ } region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+ QLCNIC_DUMP_NOP = 0,
+ QLCNIC_DUMP_READ_CRB = 1,
+ QLCNIC_DUMP_READ_MUX = 2,
+ QLCNIC_DUMP_QUEUE = 3,
+ QLCNIC_DUMP_BRD_CONFIG = 4,
+ QLCNIC_DUMP_READ_OCM = 6,
+ QLCNIC_DUMP_PEG_REG = 7,
+ QLCNIC_DUMP_L1_DTAG = 8,
+ QLCNIC_DUMP_L1_ITAG = 9,
+ QLCNIC_DUMP_L1_DATA = 11,
+ QLCNIC_DUMP_L1_INST = 12,
+ QLCNIC_DUMP_L2_DTAG = 21,
+ QLCNIC_DUMP_L2_ITAG = 22,
+ QLCNIC_DUMP_L2_DATA = 23,
+ QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
+ QLCNIC_DUMP_READ_ROM = 71,
+ QLCNIC_DUMP_READ_MEM = 72,
+ QLCNIC_DUMP_READ_CTRL = 98,
+ QLCNIC_DUMP_TLHDR = 99,
+ QLCNIC_DUMP_RDEND = 255
+};
+
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
+struct qlcnic_dump_operations {
+ enum qlcnic_minidump_opcode opcode;
+ u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+ __le32 *);
+};
+
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 addr, data;
+ struct __crb *crb = &entry->region.crb;
+
+ addr = crb->addr;
+
+ for (i = 0; i < crb->no_ops; i++) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += crb->stride;
+ }
+ return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
+ int i, k, timeout = 0;
+ u32 addr, data, temp;
+ u8 no_ops;
+
+ addr = ctr->addr;
+ no_ops = ctr->no_ops;
+
+ for (i = 0; i < no_ops; i++) {
+ k = 0;
+ for (k = 0; k < 8; k++) {
+ if (!(ctr->opcode & (1 << k)))
+ continue;
+ switch (1 << k) {
+ case QLCNIC_DUMP_WCRB:
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
+ break;
+ case QLCNIC_DUMP_RWCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
+ break;
+ case QLCNIC_DUMP_ANDCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
+ break;
+ case QLCNIC_DUMP_ORCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
+ break;
+ case QLCNIC_DUMP_POLLCRB:
+ while (timeout <= ctr->timeout) {
+ data = qlcnic_ind_rd(adapter, addr);
+ if ((data & ctr->val2) == ctr->val1)
+ break;
+ usleep_range(1000, 2000);
+ timeout++;
+ }
+ if (timeout > ctr->timeout) {
+ dev_info(&adapter->pdev->dev,
+ "Timed out, aborting poll CRB\n");
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_DUMP_RD_SAVE:
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
+ break;
+ case QLCNIC_DUMP_WRT_SAVED:
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ else
+ data = ctr->val1;
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ qlcnic_ind_wr(adapter, addr, data);
+ break;
+ case QLCNIC_DUMP_MOD_SAVE_ST:
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
+ data <<= ctr->shl_val;
+ data >>= ctr->shr_val;
+ if (ctr->val2)
+ data &= ctr->val2;
+ data |= ctr->val3;
+ data += ctr->val1;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode\n");
+ break;
+ }
+ }
+ addr += ctr->stride;
+ }
+ return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int loop;
+ u32 val, data = 0;
+ struct __mux *mux = &entry->region.mux;
+
+ val = mux->val;
+ for (loop = 0; loop < mux->no_ops; loop++) {
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
+ *buffer++ = cpu_to_le32(val);
+ *buffer++ = cpu_to_le32(data);
+ val += mux->val_stride;
+ }
+ return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, loop;
+ u32 cnt, addr, data, que_id = 0;
+ struct __queue *que = &entry->region.que;
+
+ addr = que->read_addr;
+ cnt = que->read_addr_cnt;
+
+ for (loop = 0; loop < que->no_ops; loop++) {
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
+ addr = que->read_addr;
+ for (i = 0; i < cnt; i++) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += que->read_addr_stride;
+ }
+ que_id += que->stride;
+ }
+ return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 data;
+ void __iomem *addr;
+ struct __ocm *ocm = &entry->region.ocm;
+
+ addr = adapter->ahw->pci_base0 + ocm->read_addr;
+ for (i = 0; i < ocm->no_ops; i++) {
+ data = readl(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += ocm->read_addr_stride;
+ }
+ return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, count = 0;
+ u32 fl_addr, size, val, lck_val, addr;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+lock_try:
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ usleep_range(10000, 11000);
+ count++;
+ goto lock_try;
+ }
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
+ for (i = 0; i < size; i++) {
+ addr = fl_addr & 0xFFFF0000;
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
+ addr = LSW(fl_addr) + FLASH_ROM_DATA;
+ val = qlcnic_ind_rd(adapter, addr);
+ fl_addr += 4;
+ *buffer++ = cpu_to_le32(val);
+ }
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ struct __cache *l1 = &entry->region.cache;
+
+ val = l1->init_tag_val;
+
+ for (i = 0; i < l1->no_ops; i++) {
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
+ addr = l1->read_addr;
+ cnt = l1->read_addr_num;
+ while (cnt) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += l1->read_addr_stride;
+ cnt--;
+ }
+ val += l1->stride;
+ }
+ return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ u8 poll_mask, poll_to, time_out = 0;
+ struct __cache *l2 = &entry->region.cache;
+
+ val = l2->init_tag_val;
+ poll_mask = LSB(MSW(l2->ctrl_val));
+ poll_to = MSB(MSW(l2->ctrl_val));
+
+ for (i = 0; i < l2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, l2->addr, val);
+ if (LSW(l2->ctrl_val))
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
+ if (!poll_mask)
+ goto skip_poll;
+ do {
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
+ if (!(data & poll_mask))
+ break;
+ usleep_range(1000, 2000);
+ time_out++;
+ } while (time_out <= poll_to);
+
+ if (time_out > poll_to) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return -EINVAL;
+ }
+skip_poll:
+ addr = l2->read_addr;
+ cnt = l2->read_addr_num;
+ while (cnt) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += l2->read_addr_stride;
+ cnt--;
+ }
+ val += l2->stride;
+ }
+ return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
+ struct __mem *mem, __le32 *buffer,
+ int *ret)
+{
+ u32 addr, data, test;
+ int i, reg_read;
+
+ reg_read = mem->size;
+ addr = mem->addr;
+ /* check for data size of multiple of 16 and 16 byte alignment */
+ if ((addr & 0xf) || (reg_read%16)) {
+ dev_info(&adapter->pdev->dev,
+ "Unaligned memory addr:0x%x size:0x%x\n",
+ addr, reg_read);
+ *ret = -EINVAL;
+ return 0;
+ }
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ while (reg_read != 0) {
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
+
+ for (i = 0; i < MAX_CTL_CHECK; i++) {
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+ if (!(test & TA_CTL_BUSY))
+ break;
+ }
+ if (i == MAX_CTL_CHECK) {
+ if (printk_ratelimit()) {
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ *ret = -EIO;
+ goto out;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
+ *buffer++ = cpu_to_le32(data);
+ }
+ addr += 16;
+ reg_read -= 16;
+ ret += 16;
+ }
+out:
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return mem->size;
+}
+
+/* DMA register base address */
+#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
+
+/* DMA register offsets w.r.t base address */
+#define QLC_DMA_CMD_BUFF_ADDR_LOW 0
+#define QLC_DMA_CMD_BUFF_ADDR_HI 4
+#define QLC_DMA_CMD_STATUS_CTRL 8
+
+static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
+ struct __mem *mem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 dma_no, dma_base_addr, temp_addr;
+ int i, ret, dma_sts;
+ void *tmpl_hdr;
+
+ tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for DMA to complete */
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ for (i = 0; i < 400; i++) {
+ dma_sts = qlcnic_ind_rd(adapter, temp_addr);
+
+ if (dma_sts & BIT_1)
+ usleep_range(250, 500);
+ else
+ break;
+ }
+
+ if (i >= 400) {
+ dev_info(dev, "PEX DMA operation timed out");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
+ struct __mem *mem,
+ __le32 *buffer, int *ret)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 temp, dma_base_addr, size = 0, read_size = 0;
+ struct qlcnic_pex_dma_descriptor *dma_descr;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t dma_phys_addr;
+ void *dma_buffer;
+ void *tmpl_hdr;
+
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Check if DMA engine is available */
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
+ temp = qlcnic_ind_rd(adapter,
+ dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
+
+ if (!(temp & BIT_31)) {
+ dev_info(dev, "%s: DMA engine is not available\n", __func__);
+ *ret = -EIO;
+ return 0;
+ }
+
+ /* Create DMA descriptor */
+ dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
+ GFP_KERNEL);
+ if (!dma_descr) {
+ *ret = -ENOMEM;
+ return 0;
+ }
+
+ /* dma_desc_cmd 0:15 = 0
+ * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
+ * dma_desc_cmd 20:23 = pci function number
+ * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
+ */
+ dma_phys_addr = fw_dump->phys_addr;
+ dma_buffer = fw_dump->dma_buffer;
+ temp = 0;
+ temp = mem->dma_desc_cmd & 0xff0f;
+ temp |= (adapter->ahw->pci_func & 0xf) << 4;
+ dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
+ dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
+ dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
+ dma_descr->src_addr_high = 0;
+
+ /* Collect memory dump using multiple DMA operations if required */
+ while (read_size < mem->size) {
+ if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
+ size = QLC_PEX_DMA_READ_SIZE;
+ else
+ size = mem->size - read_size;
+
+ dma_descr->src_addr_low = mem->addr + read_size;
+ dma_descr->read_data_size = size;
+
+ /* Write DMA descriptor to MS memory*/
+ temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
+ if (*ret) {
+ dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
+ mem->desc_card_addr);
+ goto free_dma_descr;
+ }
+
+ *ret = qlcnic_start_pex_dma(adapter, mem);
+ if (*ret) {
+ dev_info(dev, "Failed to start PEX DMA operation\n");
+ goto free_dma_descr;
+ }
+
+ memcpy(buffer, dma_buffer, size);
+ buffer += size / 4;
+ read_size += size;
+ }
+
+free_dma_descr:
+ kfree(dma_descr);
+
+ return read_size;
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct device *dev = &adapter->pdev->dev;
+ struct __mem *mem = &entry->region.mem;
+ u32 data_size;
+ int ret = 0;
+
+ if (fw_dump->use_pex_dma) {
+ data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
+ &ret);
+ if (ret)
+ dev_info(dev,
+ "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
+ entry->hdr.mask);
+ else
+ return data_size;
+ }
+
+ data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
+ if (ret) {
+ dev_info(dev,
+ "Failed to read memory dump using test agent method: mask[0x%x]\n",
+ entry->hdr.mask);
+ return 0;
+ } else {
+ return data_size;
+ }
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ return 0;
+}
+
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
+{
+ int ret = 1;
+ if (size != entry->hdr.cap_size) {
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
+ ret = 0;
+ }
+ return ret;
+}
+
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr)
+ return -ENOMEM;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
+ u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ void *temp_buffer;
+ int err;
+
+ ahw = adapter->ahw;
+ fw_dump = &ahw->fw_dump;
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ fw_dump->cap_mask);
+
+ qlcnic_enable_fw_dump_state(adapter);
+
+ return 0;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_entry *entry;
+ void *tmpl_hdr;
+ u32 ocm_window;
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
+
+ ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
+
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not capturing dump\n");
+ return -EIO;
+ }
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
+ /* Calculate the size for dump data area only */
+ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
+ if (!dump_size)
+ return -EIO;
+
+ fw_dump->data = vzalloc(dump_size);
+ if (!fw_dump->data)
+ return -ENOMEM;
+
+ buffer = fw_dump->data;
+ fw_dump->size = dump_size;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
+
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ hdr_83xx = tmpl_hdr;
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
+
+ for (i = 0; i < no_entries; i++) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ /* Find the handler for this entry */
+ ops_index = 0;
+ while (ops_index < ops_cnt) {
+ if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+ break;
+ ops_index++;
+ }
+
+ if (ops_index == ops_cnt) {
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
+ entry->hdr.type);
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ /* Collect dump for this entry */
+ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ buf_offset += entry->hdr.cap_size;
+ entry_offset += entry->hdr.offset;
+ buffer = fw_dump->data + buf_offset;
+ }
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, template header size %d bytes\n",
+ fw_dump->size, fw_dump->tmpl_hdr_size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
+ return 0;
+}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
+ }
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 000000000..4677b2edc
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,276 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include "qlcnic.h"
+#include <linux/types.h>
+#include <linux/pci.h>
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+ u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u8 version;
+ u8 msg_type:4;
+ u8 rsvd1:3;
+ u8 op_type:1;
+ u8 num_cmds;
+ u8 num_frags;
+ u8 frag_num;
+ u8 cmd_op;
+ u16 seq_id;
+ u64 rsvd3;
+#elif defined(__BIG_ENDIAN)
+ u8 num_frags;
+ u8 num_cmds;
+ u8 op_type:1;
+ u8 rsvd1:3;
+ u8 msg_type:4;
+ u8 version;
+ u16 seq_id;
+ u8 cmd_op;
+ u8 frag_num;
+ u64 rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+ QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+ QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+ QLCNIC_BC_CMD_GET_ACL = 0x2,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
+};
+
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+ /* Lock for manipulating list */
+ spinlock_t lock;
+ struct list_head wait_list;
+ int count;
+};
+
+enum qlcnic_trans_state {
+ QLC_INIT = 0,
+ QLC_WAIT_FOR_CHANNEL_FREE,
+ QLC_WAIT_FOR_RESP,
+ QLC_ABORT,
+ QLC_END,
+};
+
+struct qlcnic_bc_trans {
+ u8 func_id;
+ u8 active;
+ u8 curr_rsp_frag;
+ u8 curr_req_frag;
+ u16 cmd_id;
+ u16 req_pay_size;
+ u16 rsp_pay_size;
+ u32 trans_id;
+ enum qlcnic_trans_state trans_state;
+ struct list_head list;
+ struct qlcnic_bc_hdr *req_hdr;
+ struct qlcnic_bc_hdr *rsp_hdr;
+ struct qlcnic_bc_payload *req_pay;
+ struct qlcnic_bc_payload *rsp_pay;
+ struct completion resp_cmpl;
+ struct qlcnic_vf_info *vf;
+};
+
+enum qlcnic_vf_state {
+ QLC_BC_VF_SEND = 0,
+ QLC_BC_VF_RECV,
+ QLC_BC_VF_CHANNEL,
+ QLC_BC_VF_STATE,
+ QLC_BC_VF_FLR,
+ QLC_BC_VF_SOFT_FLR,
+};
+
+enum qlcnic_vlan_mode {
+ QLC_NO_VLAN_MODE = 0,
+ QLC_PVID_MODE,
+ QLC_GUEST_VLAN_MODE,
+};
+
+struct qlcnic_resources {
+ u16 num_tx_mac_filters;
+ u16 num_rx_ucast_mac_filters;
+ u16 num_rx_mcast_mac_filters;
+
+ u16 num_txvlan_keys;
+
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+
+ u16 num_rx_buf_rings;
+ u16 num_rx_status_rings;
+
+ u16 num_destip;
+ u32 num_lro_flows_supported;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+ u16 handle;
+ u16 max_tx_bw;
+ u16 min_tx_bw;
+ u16 pvid;
+ u8 vlan_mode;
+ u8 qos;
+ bool spoofchk;
+ u8 mac[6];
+};
+
+struct qlcnic_vf_info {
+ u8 pci_func;
+ u16 rx_ctx_id;
+ u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
+ unsigned long state;
+ struct completion ch_free_cmpl;
+ struct work_struct trans_work;
+ struct work_struct flr_work;
+ /* It synchronizes commands sent from VF */
+ struct mutex send_cmd_lock;
+ struct qlcnic_bc_trans *send_cmd;
+ struct qlcnic_bc_trans *flr_trans;
+ struct qlcnic_trans_list rcv_act;
+ struct qlcnic_trans_list rcv_pend;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_vport *vp;
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
+};
+
+struct qlcnic_async_work_list {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ struct qlcnic_cmd_args *cmd;
+};
+
+struct qlcnic_back_channel {
+ u16 trans_counter;
+ struct workqueue_struct *bc_trans_wq;
+ struct workqueue_struct *bc_async_wq;
+ struct workqueue_struct *bc_flr_wq;
+ struct list_head async_list;
+};
+
+struct qlcnic_sriov {
+ u16 vp_handle;
+ u8 num_vfs;
+ u8 any_vlan;
+ u8 vlan_mode;
+ u16 num_allowed_vlans;
+ u16 *allowed_vlans;
+ u16 vlan;
+ struct qlcnic_resources ff_max;
+ struct qlcnic_back_channel bc;
+ struct qlcnic_vf_info *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
+ struct qlcnic_bc_trans *);
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u16);
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_vf_info *);
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
+int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
+int qlcnic_sriov_get_vf_config(struct net_device *, int ,
+ struct ifla_vf_info *);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf) {}
+static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{ return false; }
+static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
+static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 000000000..e6312465f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,2214 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+#include <linux/types.h>
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE 1
+
+#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
+
+#define QLC_BC_MSG 0
+#define QLC_BC_CFREE 1
+#define QLC_BC_FLR 2
+#define QLC_BC_HDR_SZ 16
+#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
+
+#define QLC_83XX_VF_RESET_FAIL_THRESH 8
+#define QLC_BC_CMD_MAX_RETRY_CNT 5
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .shutdown = qlcnic_sriov_vf_shutdown,
+ .resume = qlcnic_sriov_vf_resume,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+ {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+ {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+ {QLCNIC_BC_CMD_GET_ACL, 3, 14},
+ {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+ return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+ return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_flr_check(u32 val)
+{
+ return (val & (1 << QLC_BC_FLR)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+ return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int pos;
+ u16 stride, offset;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+ return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct qlcnic_sriov *sriov;
+ struct qlcnic_back_channel *bc;
+ struct workqueue_struct *wq;
+ struct qlcnic_vport *vp;
+ struct qlcnic_vf_info *vf;
+ int err, i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return -EIO;
+
+ sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+ if (!sriov)
+ return -ENOMEM;
+
+ adapter->ahw->sriov = sriov;
+ sriov->num_vfs = num_vfs;
+ bc = &sriov->bc;
+ sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
+ num_vfs, GFP_KERNEL);
+ if (!sriov->vf_info) {
+ err = -ENOMEM;
+ goto qlcnic_free_sriov;
+ }
+
+ wq = create_singlethread_workqueue("bc-trans");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Cannot create bc-trans workqueue\n");
+ goto qlcnic_free_vf_info;
+ }
+
+ bc->bc_trans_wq = wq;
+
+ wq = create_singlethread_workqueue("async");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+ goto qlcnic_destroy_trans_wq;
+ }
+
+ bc->bc_async_wq = wq;
+ INIT_LIST_HEAD(&bc->async_list);
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->adapter = adapter;
+ vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+ mutex_init(&vf->send_cmd_lock);
+ spin_lock_init(&vf->vlan_list_lock);
+ INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+ INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+ spin_lock_init(&vf->rcv_act.lock);
+ spin_lock_init(&vf->rcv_pend.lock);
+ init_completion(&vf->ch_free_cmpl);
+
+ INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
+
+ if (qlcnic_sriov_pf_check(adapter)) {
+ vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+ if (!vp) {
+ err = -ENOMEM;
+ goto qlcnic_destroy_async_wq;
+ }
+ sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ vp->max_tx_bw = MAX_BW;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
+ random_ether_addr(vp->mac);
+ dev_info(&adapter->pdev->dev,
+ "MAC Address %pM is configured for VF %d\n",
+ vp->mac, i);
+ }
+ }
+
+ return 0;
+
+qlcnic_destroy_async_wq:
+ destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+ destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+ kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+ kfree(adapter->ahw->sriov);
+ return err;
+}
+
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_cmd_args cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&t_list->lock, flags);
+
+ while (!list_empty(&t_list->wait_list)) {
+ trans = list_first_entry(&t_list->wait_list,
+ struct qlcnic_bc_trans, list);
+ list_del(&trans->list);
+ t_list->count--;
+ cmd.req.arg = (u32 *)trans->req_pay;
+ cmd.rsp.arg = (u32 *)trans->rsp_pay;
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+
+ spin_unlock_irqrestore(&t_list->lock, flags);
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cleanup_async_list(bc);
+ destroy_workqueue(bc->bc_async_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+ }
+
+ destroy_workqueue(bc->bc_trans_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ kfree(sriov->vf_info[i].vp);
+
+ kfree(sriov->vf_info);
+ kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+ u32 *pay, u8 pci_func, u8 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+
+ return cmd.rsp_opcode;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+ adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u16 vport_id)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 status;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = vport_id << 16 | 0x1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get vport info, err=%d\n", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+
+ status = cmd.rsp.arg[2] & 0xffff;
+ if (status & BIT_0)
+ npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
+ if (status & BIT_1)
+ npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
+ if (status & BIT_2)
+ npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
+ if (status & BIT_3)
+ npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
+ if (status & BIT_4)
+ npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
+ if (status & BIT_5)
+ npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
+ if (status & BIT_6)
+ npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
+ if (status & BIT_7)
+ npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
+ if (status & BIT_8)
+ npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
+ if (status & BIT_9)
+ npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
+
+ npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
+ npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
+
+ dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ return 0;
+}
+
+static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vlans;
+ u16 *vlans;
+
+ if (sriov->allowed_vlans)
+ return 0;
+
+ sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
+ if (!sriov->any_vlan)
+ return 0;
+
+ num_vlans = sriov->num_allowed_vlans;
+ sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
+ if (!sriov->allowed_vlans)
+ return -ENOMEM;
+
+ vlans = (u16 *)&cmd->rsp.arg[3];
+ for (i = 0; i < num_vlans; i++)
+ sriov->allowed_vlans[i] = vlans[i];
+
+ return 0;
+}
+
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
+ ret);
+ } else {
+ sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
+ switch (sriov->vlan_mode) {
+ case QLC_GUEST_VLAN_MODE:
+ ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
+ break;
+ case QLC_PVID_MODE:
+ ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+ break;
+ }
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
+ int err;
+
+ err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
+ if (err)
+ return err;
+
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return -EIO;
+
+ qlcnic_sriov_vf_cfg_buff_desc(adapter);
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->op_mode = nic_info.op_mode;
+ ahw->capabilities = nic_info.capabilities;
+ return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
+ int pci_using_dac)
+{
+ int err;
+
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
+ INIT_LIST_HEAD(&adapter->vf_mc_list);
+ if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+ dev_warn(&adapter->pdev->dev,
+ "Device does not support MSI interrupts\n");
+
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ err = qlcnic_sriov_init(adapter, 1);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto err_out_cleanup_sriov;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_disable_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ if (err)
+ goto err_out_send_channel_term;
+
+ pci_set_drvdata(adapter->pdev, adapter);
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ adapter->ahw->idc.delay);
+ return 0;
+
+err_out_send_channel_term:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+ __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ return err;
+}
+
+static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ do {
+ msleep(20);
+ if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
+ return -EIO;
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ } while (state != QLC_83XX_IDC_DEV_READY);
+
+ return 0;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->reset_context = 0;
+ adapter->fw_fail_cnt = 0;
+ ahw->msix_supported = 1;
+ adapter->need_fw_reset = 0;
+ adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+ err = qlcnic_sriov_check_dev_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+ if (err)
+ return err;
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged SRIOV function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_sriov_vf_ops;
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+ u32 pay_size;
+
+ pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+ if (pay_size)
+ pay_size = QLC_BC_PAYLOAD_SZ;
+ else
+ pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+ return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+ u8 i;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+ if (vf_info[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+ *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+ if (!*trans)
+ return -ENOMEM;
+
+ init_completion(&(*trans)->resp_cmpl);
+ return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+ u32 size)
+{
+ *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+ int i, size;
+
+ mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_BC_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+ (3 << 29));
+ mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd,
+ u16 seq, u8 msg_type)
+{
+ struct qlcnic_bc_hdr *hdr;
+ int i;
+ u32 num_regs, bc_pay_sz;
+ u16 remainder;
+ u8 cmd_op, num_frags, t_num_frags;
+
+ bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+ if (msg_type == QLC_BC_COMMAND) {
+ trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+ trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+ num_regs = cmd->req.num;
+ trans->req_pay_size = (num_regs * 4);
+ num_regs = cmd->rsp.num;
+ trans->rsp_pay_size = (num_regs * 4);
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->req_pay_size) % (bc_pay_sz);
+ num_frags = (trans->req_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ t_num_frags = num_frags;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+ return -ENOMEM;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+ return -ENOMEM;
+ num_frags = t_num_frags;
+ hdr = trans->req_hdr;
+ } else {
+ cmd->req.arg = (u32 *)trans->req_pay;
+ cmd->rsp.arg = (u32 *)trans->rsp_pay;
+ cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ cmd->req.num = trans->req_pay_size / 4;
+ cmd->rsp.num = trans->rsp_pay_size / 4;
+ hdr = trans->rsp_hdr;
+ cmd->op_type = trans->req_hdr->op_type;
+ }
+
+ trans->trans_id = seq;
+ trans->cmd_id = cmd_op;
+ for (i = 0; i < num_frags; i++) {
+ hdr[i].version = 2;
+ hdr[i].msg_type = msg_type;
+ hdr[i].op_type = cmd->op_type;
+ hdr[i].num_cmds = 1;
+ hdr[i].num_frags = num_frags;
+ hdr[i].frag_num = i + 1;
+ hdr[i].cmd_op = cmd_op;
+ hdr[i].seq_id = seq;
+ }
+ return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+ if (!trans)
+ return;
+ kfree(trans->req_hdr);
+ kfree(trans->rsp_hdr);
+ kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_trans_list *t_list;
+ unsigned long flags;
+ int ret = 0;
+
+ if (type == QLC_BC_RESPONSE) {
+ t_list = &vf->rcv_act;
+ spin_lock_irqsave(&t_list->lock, flags);
+ t_list->count--;
+ list_del(&trans->list);
+ if (t_list->count > 0)
+ ret = 1;
+ spin_unlock_irqrestore(&t_list->lock, flags);
+ }
+ if (type == QLC_BC_COMMAND) {
+ while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ msleep(100);
+ vf->send_cmd = NULL;
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+ }
+ return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ return;
+
+ queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+ struct completion *cmpl = &trans->resp_cmpl;
+
+ if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+ trans->trans_state = QLC_END;
+ else
+ trans->trans_state = QLC_ABORT;
+
+ return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ if (type == QLC_BC_RESPONSE) {
+ trans->curr_rsp_frag++;
+ if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_END;
+ } else {
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag < trans->req_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_WAIT_FOR_RESP;
+ }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct completion *cmpl = &vf->ch_free_cmpl;
+
+ if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+ trans->trans_state = QLC_ABORT;
+ return;
+ }
+
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+ u32 *hdr, u32 *pay, u32 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_mbx;
+ u8 i, max = 2, hdr_size, j;
+
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ max = (size / sizeof(u32)) + hdr_size;
+
+ fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+ for (; j < max; i++, j++)
+ *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+ int ret = -EBUSY;
+ u32 timeout = 10000;
+
+ do {
+ if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+ ret = 0;
+ break;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u32 pay_size, hdr_size;
+ u32 *hdr, *pay;
+ int ret;
+ u8 pci_func = trans->func_id;
+
+ if (__qlcnic_sriov_issue_bc_post(vf))
+ return -EBUSY;
+
+ if (type == QLC_BC_COMMAND) {
+ hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+ pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ pay_size = (pay_size / sizeof(u32));
+ } else {
+ hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+ pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ pay_size = (pay_size / sizeof(u32));
+ }
+
+ ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+ pci_func, pay_size);
+ return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf, u8 type)
+{
+ bool flag = true;
+ int err = -EIO;
+
+ while (flag) {
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ trans->trans_state = QLC_ABORT;
+
+ switch (trans->trans_state) {
+ case QLC_INIT:
+ trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+ if (qlcnic_sriov_issue_bc_post(trans, type))
+ trans->trans_state = QLC_ABORT;
+ break;
+ case QLC_WAIT_FOR_CHANNEL_FREE:
+ qlcnic_sriov_wait_for_channel_free(trans, type);
+ break;
+ case QLC_WAIT_FOR_RESP:
+ qlcnic_sriov_wait_for_resp(trans);
+ break;
+ case QLC_END:
+ err = 0;
+ flag = false;
+ break;
+ case QLC_ABORT:
+ err = -EIO;
+ flag = false;
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ break;
+ default:
+ err = -EIO;
+ flag = false;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans, int pci_func)
+{
+ struct qlcnic_vf_info *vf;
+ int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return -EIO;
+
+ vf = &adapter->ahw->sriov->vf_info[index];
+ trans->vf = vf;
+ trans->func_id = pci_func;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ if (qlcnic_sriov_pf_check(adapter))
+ return -EIO;
+ if (qlcnic_sriov_vf_check(adapter) &&
+ trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return -EIO;
+ }
+
+ mutex_lock(&vf->send_cmd_lock);
+ vf->send_cmd = trans;
+ err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+ qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+ mutex_unlock(&vf->send_cmd_lock);
+ return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+ return;
+ }
+#endif
+ cmd->rsp.arg[0] |= (0x9 << 25);
+ return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+ trans_work);
+ struct qlcnic_bc_trans *trans = NULL;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u8 req;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (test_bit(QLC_BC_VF_FLR, &vf->state))
+ return;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ trans = list_first_entry(&vf->rcv_act.wait_list,
+ struct qlcnic_bc_trans, list);
+ adapter = vf->adapter;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+ QLC_BC_RESPONSE))
+ goto cleanup_trans;
+
+ __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+ trans->trans_state = QLC_INIT;
+ __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+ qlcnic_free_mbx_args(&cmd);
+ req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+ qlcnic_sriov_cleanup_transaction(trans);
+ if (req)
+ qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ u32 pay_size;
+
+ if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ return;
+
+ trans = vf->send_cmd;
+
+ if (trans == NULL)
+ goto clear_send;
+
+ if (trans->trans_id != hdr->seq_id)
+ goto clear_send;
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+ (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+ pay_size);
+ if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ goto clear_send;
+
+ complete(&trans->resp_cmpl);
+
+clear_send:
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ t_list->count++;
+ list_add_tail(&trans->list, &t_list->wait_list);
+ if (t_list->count == 1)
+ qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ spin_lock(&t_list->lock);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock(&t_list->lock);
+ return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_hdr *hdr)
+{
+ struct qlcnic_bc_trans *trans = NULL;
+ struct list_head *node;
+ u32 pay_size, curr_frag;
+ u8 found = 0, active = 0;
+
+ spin_lock(&vf->rcv_pend.lock);
+ if (vf->rcv_pend.count > 0) {
+ list_for_each(node, &vf->rcv_pend.wait_list) {
+ trans = list_entry(node, struct qlcnic_bc_trans, list);
+ if (trans->trans_id == hdr->seq_id) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ curr_frag = trans->curr_req_frag;
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ curr_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + curr_frag),
+ (u32 *)(trans->req_pay + curr_frag),
+ pay_size);
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag >= hdr->num_frags) {
+ vf->rcv_pend.count--;
+ list_del(&trans->list);
+ active = 1;
+ }
+ }
+ spin_unlock(&vf->rcv_pend.lock);
+
+ if (active)
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+ qlcnic_sriov_cleanup_transaction(trans);
+
+ return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 pay_size;
+ int err;
+ u8 cmd_op;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+ hdr->op_type != QLC_BC_CMD &&
+ hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return;
+
+ if (hdr->frag_num > 1) {
+ qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+ return;
+ }
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd_op = hdr->cmd_op;
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return;
+
+ if (hdr->op_type == QLC_BC_CMD)
+ err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+ else
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+ if (err) {
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ cmd.op_type = hdr->op_type;
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+ QLC_BC_COMMAND)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + trans->curr_req_frag),
+ (u32 *)(trans->req_pay + trans->curr_req_frag),
+ pay_size);
+ trans->func_id = vf->pci_func;
+ trans->vf = vf;
+ trans->trans_id = hdr->seq_id;
+ trans->curr_req_frag++;
+
+ if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
+ return;
+
+ if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+ } else {
+ spin_lock(&vf->rcv_pend.lock);
+ list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+ vf->rcv_pend.count++;
+ spin_unlock(&vf->rcv_pend.lock);
+ }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr hdr;
+ u32 *ptr = (u32 *)&hdr;
+ u8 msg_type, i;
+
+ for (i = 2; i < 6; i++)
+ ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+ msg_type = hdr.msg_type;
+
+ switch (msg_type) {
+ case QLC_BC_COMMAND:
+ qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+ break;
+ case QLC_BC_RESPONSE:
+ qlcnic_sriov_handle_bc_resp(&hdr, vf);
+ break;
+ }
+}
+
+static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_handle_flr(sriov, vf);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Invalid event to VF. VF should not get FLR event\n");
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_sriov *sriov;
+ int index;
+ u8 pci_func;
+
+ sriov = adapter->ahw->sriov;
+ pci_func = qlcnic_sriov_target_func_id(event);
+ index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return;
+
+ vf = &sriov->vf_info[index];
+ vf->pci_func = pci_func;
+
+ if (qlcnic_sriov_channel_free_check(event))
+ complete(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_flr_check(event)) {
+ qlcnic_sriov_handle_flr_event(sriov, vf);
+ return;
+ }
+
+ if (qlcnic_sriov_bc_msg_check(event))
+ qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+ return -ENOMEM;
+
+ if (enable)
+ cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s bc events, err=%d\n",
+ (enable ? "enable" : "disable"), err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans)
+{
+ u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
+ u32 state;
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (state == QLC_83XX_IDC_DEV_READY) {
+ msleep(20);
+ clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
+ trans->trans_state = QLC_INIT;
+ if (++adapter->fw_fail_cnt > max)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_bc_trans *trans;
+ int err;
+ u32 rsp_data, opcode, mbx_err_code, rsp;
+ u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+ u8 func = ahw->pci_func;
+
+ rsp = qlcnic_sriov_alloc_bc_trans(&trans);
+ if (rsp)
+ goto free_cmd;
+
+ rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
+ if (rsp)
+ goto cleanup_transaction;
+
+retry:
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ rsp = -EIO;
+ QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+ QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
+ goto err_out;
+ }
+
+ err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
+ if (err) {
+ dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
+ (cmd->req.arg[0] & 0xffff), func);
+ rsp = QLCNIC_RCODE_TIMEOUT;
+
+ /* After adapter reset PF driver may take some time to
+ * respond to VF's request. Retry request till maximum retries.
+ */
+ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ !qlcnic_sriov_retry_bc_cmd(adapter, trans))
+ goto retry;
+
+ goto err_out;
+ }
+
+ rsp_data = cmd->rsp.arg[0];
+ mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+ opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+ if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+ (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
+ }
+
+err_out:
+ if (rsp == QLCNIC_RCODE_TIMEOUT) {
+ ahw->reset_context = 1;
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ }
+
+cleanup_transaction:
+ qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ }
+
+ return rsp;
+}
+
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+ return -ENOMEM;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+ ret);
+ goto out;
+ }
+
+ cmd_op = (cmd.rsp.arg[0] & 0xff);
+ if (cmd.rsp.arg[0] >> 25 == 2)
+ return 2;
+ if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
+ enum qlcnic_mac_type mac_type)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id,
+ mac_type);
+ }
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+ }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+ struct list_head *head = &bc->async_list;
+ struct qlcnic_async_work_list *entry;
+
+ flush_workqueue(bc->bc_async_wq);
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, struct qlcnic_async_work_list,
+ list);
+ cancel_work_sync(&entry->work);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
+ if (!netdev_mc_empty(netdev)) {
+ qlcnic_flush_mcast_mac(adapter);
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr,
+ QLCNIC_MULTICAST_MAC);
+ }
+ }
+
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr,
+ QLCNIC_UNICAST_MAC);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
+{
+ struct qlcnic_async_work_list *entry;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
+
+ entry = container_of(work, struct qlcnic_async_work_list, work);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
+ return;
+}
+
+static struct qlcnic_async_work_list *
+qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+{
+ struct list_head *node;
+ struct qlcnic_async_work_list *entry = NULL;
+ u8 empty = 0;
+
+ list_for_each(node, &bc->async_list) {
+ entry = list_entry(node, struct qlcnic_async_work_list, list);
+ if (!work_pending(&entry->work)) {
+ empty = 1;
+ break;
+ }
+ }
+
+ if (!empty) {
+ entry = kzalloc(sizeof(struct qlcnic_async_work_list),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ return NULL;
+ list_add_tail(&entry->list, &bc->async_list);
+ }
+
+ return entry;
+}
+
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_async_work_list *entry = NULL;
+
+ entry = qlcnic_sriov_get_free_node_async_work(bc);
+ if (!entry)
+ return;
+
+ entry->ptr = data;
+ entry->cmd = cmd;
+ INIT_WORK(&entry->work, func);
+ queue_work(bc->bc_async_wq, &entry->work);
+}
+
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+ if (adapter->need_fw_reset)
+ return -EIO;
+
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ adapter->need_fw_reset = 0;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_cleanup_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_term_channel;
+
+ return 0;
+
+err_out_term_channel:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_cleanup_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ return err;
+}
+
+static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (!qlcnic_up(adapter, netdev))
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
+ struct net_device *netdev = adapter->netdev;
+ u8 i, max_ints = ahw->num_msix - 1;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ for (i = 0; i < max_ints; i++) {
+ intr_tbl[i].id = i;
+ intr_tbl[i].enabled = 0;
+ intr_tbl[i].src = 0;
+ }
+ ahw->reset_context = 0;
+}
+
+static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev,
+ "%s: Reinitialization of VF 0x%x done after FW reset\n",
+ __func__, func);
+ } else {
+ dev_err(dev,
+ "%s: Reinitialization of VF 0x%x failed after FW reset\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "Current state 0x%x after FW reset\n",
+ state);
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ adapter->reset_ctx_cnt++;
+
+ /* Skip the context reset and check if FW is hung */
+ if (adapter->reset_ctx_cnt < 3) {
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ dev_info(dev,
+ "Resetting context, wait here to check if FW is in failed state\n");
+ return 0;
+ }
+
+ /* Check if number of resets exceed the threshold.
+ * If it exceeds the threshold just fail the VF.
+ */
+ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ adapter->tx_timeo_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ qlcnic_sriov_vf_detach(adapter);
+ dev_err(dev,
+ "Device context resets have exceeded the threshold, device interface will be shutdown\n");
+ return -EIO;
+ }
+
+ dev_info(dev, "Resetting context of VF 0x%x\n", func);
+ dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
+ __func__, adapter->reset_ctx_cnt, func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ adapter->need_fw_reset = 0;
+
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev, "Done resetting context for VF 0x%x\n", func);
+ } else {
+ dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
+ ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
+ else if (ahw->reset_context)
+ ret = qlcnic_sriov_vf_handle_context_reset(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_err(&adapter->pdev->dev, "Device is in failed state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
+ qlcnic_sriov_vf_detach(adapter);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+}
+
+static int
+qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ u8 func = adapter->ahw->pci_func;
+
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware hang detected by VF 0x%x\n", func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlc_83xx_idc *idc;
+ int ret = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ idc = &adapter->ahw->idc;
+ idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ switch (idc->curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ ret = qlcnic_sriov_vf_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ case QLC_83XX_IDC_DEV_INIT:
+ ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ ret = qlcnic_sriov_vf_idc_failed_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ break;
+ default:
+ ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
+ }
+
+ idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
+ if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+}
+
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(20);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
+ u8 allowed = 0;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
+ if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
+ return -EINVAL;
+
+ if (enable) {
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
+ return -EINVAL;
+
+ if (sriov->any_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (sriov->allowed_vlans[i] == vid)
+ allowed = 1;
+ }
+
+ if (!allowed)
+ return -EINVAL;
+ }
+ } else {
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return;
+}
+
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (vid == 0)
+ return 0;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = (enable & 1) | vid << 16;
+
+ qlcnic_sriov_cleanup_async_list(&sriov->bc);
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure guest VLAN, err=%d\n", ret);
+ } else {
+ netif_addr_lock_bh(netdev);
+ qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
+
+ if (enable)
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
+ else
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
+
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (!err) {
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 000000000..a29538b86
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,2047 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include <linux/types.h>
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
+#define QLC_VF_MIN_TX_RATE 100
+#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+ u32 cmd;
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info,
+ u16 vport_id)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = (vport_id << 16) | 0x1;
+ cmd.req.arg[2] = npar_info->bit_offsets;
+ cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+ cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+ cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+ cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+ cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+ (npar_info->max_rx_ip_addr << 16);
+ cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+ (npar_info->max_rx_status_rings << 16);
+ cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+ (npar_info->max_rx_ques << 16);
+ cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+ cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+ cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set vport info, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u16 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_resources *res = &sriov->ff_max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
+ int ret = -EIO, vpid, id;
+ struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0)
+ return -EINVAL;
+
+ num_vfs = sriov->num_vfs;
+ max = num_vfs + 1;
+ info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
+
+ info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+
+ if (adapter->ahw->pci_func == func) {
+ info->min_tx_bw = 0;
+ info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
+ } else {
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+ vp = sriov->vf_info[id].vp;
+ info->min_tx_bw = vp->min_tx_bw;
+ info->max_tx_bw = vp->max_tx_bw;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
+ }
+
+ info->max_rx_ip_addr = res->num_destip / max;
+ info->max_rx_status_rings = res->num_rx_status_rings / max;
+ info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+ info->max_rx_ques = res->num_rx_queues / max;
+ info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+ info->max_tx_vlan_keys = res->num_txvlan_keys;
+ info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+ info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+ ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+ ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+ ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+ ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+ ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+ ff_max->num_rx_queues = info->max_rx_ques;
+ ff_max->num_tx_queues = info->max_tx_ques;
+ ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+ ff_max->num_destip = info->max_rx_ip_addr;
+ ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+ ff_max->num_rx_status_rings = info->max_rx_status_rings;
+ ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+ ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = 0x2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PF info, err=%d\n", err);
+ goto out;
+ }
+
+ npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+ npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+ npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+ npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
+ npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+ npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+ npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
+ qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
+ dev_info(&adapter->pdev->dev,
+ "\n\ttotal_pf: %d,\n"
+ "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+ npar_info->total_pf, npar_info->total_rss_engines,
+ npar_info->max_vports, npar_info->max_tx_ques,
+ npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = 0;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = 0;
+ }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+ u16 vport_handle, u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = vport_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = vport_handle;
+ }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ return sriov->vp_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index >= 0) {
+ vf_info = &sriov->vf_info[index];
+ return vf_info->vp->handle;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+ u8 flag, u16 func)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+ return -ENOMEM;
+
+ if (flag) {
+ cmd.req.arg[3] = func << 8;
+ } else {
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+ }
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed %s vport, err %d for func 0x%x\n",
+ (flag ? "enable" : "disable"), ret, func);
+ goto out;
+ }
+
+ if (flag) {
+ vpid = cmd.rsp.arg[2] & 0xffff;
+ qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+ } else {
+ qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
+ u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x4;
+ if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VLAN filtering, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+ u8 func, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+ return -ENOMEM;
+
+ cmd.req.arg[0] |= (3 << 29);
+ cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+ if (enable)
+ cmd.req.arg[1] |= BIT_0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable sriov eswitch%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ cancel_work_sync(&sriov->vf_info[i].flr_work);
+
+ destroy_workqueue(bc->bc_flr_wq);
+}
+
+static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("qlcnic-flr");
+ if (wq == NULL) {
+ dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
+ return -ENOMEM;
+ }
+
+ bc->bc_flr_wq = wq;
+ return 0;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+ u8 func = adapter->ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+ if (!qlcnic_sriov_pf_check(adapter))
+ return;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ pci_disable_sriov(adapter->pdev);
+ netdev_info(adapter->netdev,
+ "SR-IOV is disabled successfully on port %d\n",
+ adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (pci_vfs_assigned(adapter->pdev)) {
+ netdev_err(adapter->netdev,
+ "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+ adapter->portnum);
+ netdev_info(adapter->netdev,
+ "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+ adapter->portnum);
+ return -EPERM;
+ }
+
+ qlcnic_sriov_pf_disable(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ /* After disabling SRIOV re-init the driver in default mode
+ configure opmode based on op_mode of function
+ */
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ rtnl_unlock();
+ return -EIO;
+ }
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info, pf_info, vp_info;
+ int err;
+ u8 func = ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
+ if (err)
+ return err;
+
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
+ err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+ if (err)
+ goto disable_vlan_filtering;
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (err)
+ goto disable_eswitch;
+
+ err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto delete_vport;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+ return err;
+
+delete_vport:
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+disable_vlan_filtering:
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err)
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+ int num_vfs)
+{
+ int err = 0;
+
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_init(adapter, num_vfs);
+ if (err)
+ goto clear_op_mode;
+
+ err = qlcnic_sriov_pf_create_flr_queue(adapter);
+ if (err)
+ goto sriov_cleanup;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ goto del_flr_queue;
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
+ return err;
+
+del_flr_queue:
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+
+sriov_cleanup:
+ __qlcnic_sriov_cleanup(adapter);
+
+clear_op_mode:
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(netdev,
+ "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+ return -EIO;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+ if (err)
+ goto error;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
+ netdev_info(netdev,
+ "SR-IOV is enabled successfully on port %d\n",
+ adapter->portnum);
+ /* Return number of vfs enabled */
+ return num_vfs;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
+ rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
+ return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (num_vfs == 0)
+ err = qlcnic_pci_sriov_disable(adapter);
+ else
+ err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vport *vp;
+ int err, id;
+ u8 *mac;
+
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+
+ vp = adapter->ahw->sriov->vf_info[id].vp;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x3 | func << 16;
+ if (vp->spoofchk == true) {
+ mac = vp->mac;
+ cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
+ cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
+ mac[2] << 24;
+ cmd.req.arg[5] = mac[1] | mac[0] << 8;
+ }
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ cmd.req.arg[2] |= BIT_6;
+ cmd.req.arg[3] |= vp->pvid << 8;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+ u16 func)
+{
+ struct qlcnic_info defvp_info;
+ int err;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+ if (err)
+ return -EIO;
+
+ err = qlcnic_sriov_set_vf_acl(adapter, func);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+ u16 func = vf->pci_func;
+ size_t size;
+ int err;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (!err) {
+ err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+ if (err)
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+ } else {
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+
+ if (err)
+ goto err_out;
+
+ cmd->rsp.arg[0] |= (1 << 25);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+ return err;
+
+err_out:
+ cmd->rsp.arg[0] |= (2 << 25);
+ return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
+{
+ struct qlcnic_cmd_args *cmd;
+ struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
+ u8 *addr;
+ int err;
+ u32 *buf;
+ int vpid;
+
+ vp = vf->vp;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid < 0) {
+ err = -EINVAL;
+ goto free_args;
+ }
+
+ if (vlan)
+ op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+ addr = vp->mac;
+ mv.vlan = vlan;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ return err;
+
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ spin_unlock_bh(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[6] = vf->vp->handle;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err) {
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+ vf->rx_ctx_id = mbx_out->ctx_id;
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
+ } else {
+ vf->rx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u8 type, *mac;
+
+ type = cmd->req.arg[1];
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ cmd->rsp.arg[0] = (2 << 25);
+ break;
+ case QLCNIC_GET_CURRENT_MAC:
+ cmd->rsp.arg[0] = (1 << 25);
+ mac = vf->vp->mac;
+ cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+ cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+ ((mac[3]) << 16 & 0xff0000) |
+ ((mac[2]) << 24 & 0xff000000);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[5] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err) {
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+ vf->tx_ctx_id = mbx_out->ctx_id;
+ } else {
+ vf->tx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->rx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->tx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err = -EIO;
+ u8 op;
+
+ op = cmd->req.arg[1] & 0xff;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_16))
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xff) != 0x1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] & BIT_31) {
+ if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+ return -EINVAL;
+ } else {
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
+ ctx_id = cmd->req.arg[1] >> 16;
+ pkts = cmd->req.arg[2] & 0xffff;
+ time = cmd->req.arg[2] >> 16;
+
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
+
+ return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vport *vp = vf->vp;
+ u8 op, new_op;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ cmd->req.arg[1] |= (vf->vp->handle << 16);
+ cmd->req.arg[1] |= BIT_31;
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ op = cmd->req.arg[1] & 0x7;
+ cmd->req.arg[1] &= ~0x7;
+ new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+ cmd->req.arg[3] |= vp->pvid << 16;
+ cmd->req.arg[1] |= new_op;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_linkevent(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ u8 cmd_op, mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ cmd_op = trans->req_hdr->cmd_op;
+ cmd->rsp.arg[0] |= 1 << 25;
+
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
+
+ switch (mode) {
+ case QLC_GUEST_VLAN_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
+ break;
+ case QLC_PVID_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
+ break;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
+
+ if (!qlcnic_sriov_check_any_vlan(vf))
+ return -EINVAL;
+
+ vlan = cmd->req.arg[1] >> 16;
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
+ return 0;
+}
+
+static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int err = -EIO;
+ u16 vlan;
+
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
+ return err;
+
+ vlan = cmd->req.arg[1] >> 16;
+
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
+
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
+
+ if (err) {
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
+ }
+
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_vport *vp = vf->vp;
+ int err = -EIO;
+ u8 op;
+
+ if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
+ cmd->rsp.arg[0] |= 2 << 25;
+ return err;
+ }
+
+ op = cmd->req.arg[1] & 0xf;
+
+ if (op)
+ err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
+ else
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
+
+ cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
+ return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+ QLCNIC_CMD_GET_STATISTICS,
+ QLCNIC_CMD_GET_PORT_CONFIG,
+ QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+ [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
+ [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+ {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+ {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+ {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+ {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+ {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+ {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+ {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+ {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+ {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 size, cmd_op;
+
+ cmd_op = trans->req_hdr->cmd_op;
+
+ if (trans->req_hdr->op_type == QLC_BC_CMD) {
+ size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+ if (cmd_op < size) {
+ qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+ return;
+ }
+ } else {
+ int i;
+ size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+ qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+ return;
+ }
+ }
+
+ size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+ qlcnic_issue_cmd(adapter, cmd);
+ return;
+ }
+ }
+ }
+
+ cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ int vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->rx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->rx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->tx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->tx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+ unsigned long flag;
+
+ spin_lock_irqsave(&t_list->lock, flag);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock_irqrestore(&t_list->lock, flag);
+ return 0;
+}
+
+static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_del_tx_ctx(adapter, vf);
+ qlcnic_sriov_del_rx_ctx(adapter, vf);
+ }
+
+ qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
+
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
+ vf->flr_trans);
+ clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = NULL;
+ }
+}
+
+static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf;
+
+ vf = container_of(work, struct qlcnic_vf_info, flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ return;
+}
+
+static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
+ return;
+
+ INIT_WORK(&vf->flr_work, func);
+ queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
+}
+
+static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+ set_bit(QLC_BC_VF_FLR, &vf->state);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = trans;
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
+ vf->pci_func);
+}
+
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr *hdr = trans->req_hdr;
+
+ if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ (hdr->op_type == QLC_BC_CMD) &&
+ test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
+ return true;
+ }
+
+ return false;
+}
+
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct net_device *dev = vf->adapter->netdev;
+ struct qlcnic_vport *vp = vf->vp;
+
+ if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ return;
+ }
+
+ if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
+ netdev_info(dev, "FLR for PCI func %d in progress\n",
+ vf->pci_func);
+ return;
+ }
+
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
+
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
+}
+
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_sriov *sriov = ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 num_vfs = sriov->num_vfs;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->rx_ctx_id = 0;
+ vf->tx_ctx_id = 0;
+ cancel_work_sync(&vf->flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ }
+
+ qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
+ QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
+}
+
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
+ __func__, ahw->op_mode);
+ return err;
+}
+
+int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vfs;
+ struct qlcnic_vf_info *vf_info;
+ u8 *curr_mac;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ num_vfs = sriov->num_vfs;
+
+ if (!is_valid_ether_addr(mac) || vf >= num_vfs)
+ return -EINVAL;
+
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
+ netdev_err(netdev, "MAC address is already in use by the PF\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_info = &sriov->vf_info[i];
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
+ netdev_err(netdev,
+ "MAC address is already in use by VF %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ curr_mac = vf_info->vp->mac;
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(curr_mac, mac, netdev->addr_len);
+ netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
+ mac, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_info nic_info;
+ struct qlcnic_vport *vp;
+ u16 vpid;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate &&
+ (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+ netdev_err(netdev,
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
+ return -EINVAL;
+ }
+
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
+
+ if (min_tx_rate &&
+ (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
+ nic_info.bit_offsets = BIT_0;
+
+ if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+ }
+
+ vp->max_tx_bw = max_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs || qos > 7)
+ return -EINVAL;
+
+ if (vlan > MAX_VLAN_ID) {
+ netdev_err(netdev,
+ "Invalid VLAN ID, allowed range is [0 - %d]\n",
+ MAX_VLAN_ID);
+ return -EINVAL;
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
+ switch (vlan) {
+ case 4095:
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ break;
+ case 0:
+ vp->vlan_mode = QLC_NO_VLAN_MODE;
+ vp->qos = 0;
+ break;
+ default:
+ vp->vlan_mode = QLC_PVID_MODE;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
+ vp->qos = qos;
+ vp->pvid = vlan;
+ }
+
+ netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
+ vlan, qos, vf);
+ return 0;
+}
+
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
+{
+ __u32 vlan = 0;
+
+ switch (vp->vlan_mode) {
+ case QLC_PVID_MODE:
+ vlan = vp->pvid;
+ break;
+ case QLC_GUEST_VLAN_MODE:
+ vlan = MAX_VLAN_ID;
+ break;
+ case QLC_NO_VLAN_MODE:
+ vlan = 0;
+ break;
+ default:
+ netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+ vp->vlan_mode, vf);
+ }
+
+ return vlan;
+}
+
+int qlcnic_sriov_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vp = sriov->vf_info[vf].vp;
+ memcpy(&ivi->mac, vp->mac, ETH_ALEN);
+ ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
+ ivi->qos = vp->qos;
+ ivi->spoofchk = vp->spoofchk;
+ if (vp->max_tx_bw == MAX_BW)
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
+ else
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
+
+ ivi->vf = vf;
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ vp->spoofchk = chk;
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644
index 000000000..59a721fba
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -0,0 +1,1438 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
+
+#define QLC_STATUS_UNSUPPORTED_CMD -2
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto err_out;
+
+ if (kstrtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (kstrtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+ u8 *state, u8 *rate)
+{
+ *rate = LSB(beacon);
+ *state = MSB(beacon);
+
+ QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+ if (!*state) {
+ *rate = __QLCNIC_MAX_LED_RATE;
+ return 0;
+ } else if (*state > __QLCNIC_MAX_LED_STATE) {
+ return -EINVAL;
+ }
+
+ if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ unsigned long h_beacon;
+ int err;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+
+ if (h_beacon)
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ else
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (!err)
+ ahw->beacon_state = h_beacon;
+
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+}
+
+static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
+ u16 beacon;
+ u8 b_state, b_rate;
+
+ if (len != sizeof(u16))
+ return QL_STATUS_INVALID_PARAM;
+
+ memcpy(&beacon, buf, sizeof(u16));
+ err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+ if (err)
+ return err;
+
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == b_state)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+ if (err)
+ goto out;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ err = qlcnic_config_led(adapter, b_state, b_rate);
+ if (!err) {
+ err = len;
+ ahw->beacon_state = b_state;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
+
+out:
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+ rtnl_unlock();
+
+ return err;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(dev,
+ "LED test not supported in non privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (qlcnic_82xx_check(adapter))
+ err = qlcnic_82xx_store_beacon(adapter, buf, len);
+ else if (qlcnic_83xx_check(adapter))
+ err = qlcnic_83xx_store_beacon(adapter, buf, len);
+ else
+ return -EIO;
+
+ return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ qlcnic_write_crb(adapter, buf, offset, size);
+ return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
+ return -EINVAL;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
+
+ for (i = 0; i < count; i++) {
+ src_pci_func = pm_cfg[i].pci_func;
+ dest_pci_func = pm_cfg[i].dest_npar;
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
+ if (src_index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
+
+ if (s_esw_id != d_esw_id)
+ return QL_STATUS_INVALID_PARAM;
+ }
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u32 id, action, pci_func;
+ int count, rem, i, ret, index;
+
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ rem = size % sizeof(struct qlcnic_pm_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+ ret = validate_pm_config(adapter, pm_cfg, count);
+
+ if (ret)
+ return ret;
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ action = !!pm_cfg[i].action;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
+ }
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u8 pci_func;
+ u32 count;
+ int i;
+
+ memset(buf, 0, size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
+ u32 op_mode;
+ u8 pci_func;
+
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ if (pci_func >= ahw->max_vnic_func)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].mac_override != 1)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].promisc_mode != 1)
+ return QL_STATUS_INVALID_PARAM;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
+ if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+ return QL_STATUS_INVALID_PARAM;
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ default:
+ return QL_STATUS_INVALID_PARAM;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
+ int count, rem, i, ret;
+ int index;
+ u8 op_mode = 0, pci_func;
+
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ rem = size % sizeof(struct qlcnic_esw_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ ret = validate_esw_config(adapter, esw_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ rtnl_lock();
+ qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+ rtnl_unlock();
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
+ }
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ npar = &adapter->npars[index];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
+ }
+out:
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ u8 pci_func;
+ u32 count;
+ int i;
+
+ memset(buf, 0, size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_func_cfg *np_cfg,
+ int count)
+{
+ u8 pci_func, i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg *np_cfg;
+ int i, count, rem, ret, index;
+ u8 pci_func;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ rem = size % sizeof(struct qlcnic_npar_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+ ret = validate_npar_config(adapter, np_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+ nic_info.pci_func = pci_func;
+ nic_info.min_tx_bw = np_cfg[i].min_bw;
+ nic_info.max_tx_bw = np_cfg[i].max_bw;
+ ret = qlcnic_set_nic_info(adapter, &nic_info);
+ if (ret)
+ return ret;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
+ }
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_npar_func_cfg *np_cfg;
+ struct qlcnic_info nic_info;
+ u8 pci_func;
+ int i, ret;
+ u32 count;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(buf, 0, size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+ pci_func = adapter->npars[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+
+ np_cfg[pci_func].pci_func = pci_func;
+ np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+ np_cfg[pci_func].port_num = nic_info.phys_port;
+ np_cfg[pci_func].fw_capab = nic_info.capabilities;
+ np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+ np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+ np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= adapter->ahw->max_vnic_func)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
+ if (offset >= adapter->ahw->max_vnic_func)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pci_func_cfg *pci_cfg;
+ struct qlcnic_pci_info *pci_info;
+ int i, ret;
+ u32 count;
+
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret) {
+ kfree(pci_info);
+ return ret;
+ }
+
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
+ for (i = 0; i < count; i++) {
+ pci_cfg[i].pci_func = pci_info[i].id;
+ pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
+ pci_cfg[i].port_num = pci_info[i].default_port;
+ pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+ pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+ }
+
+ kfree(pci_info);
+ return size;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return QL_STATUS_INVALID_PARAM;
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ qlcnic_swap32_buffer((u32 *)p_read_buf, count);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ count = size / sizeof(u32);
+ qlcnic_swap32_buffer((u32 *)buf, count);
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = kstrtoul(buf, 16, &data);
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static struct device_attribute dev_attr_beacon = {
+ .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_beacon,
+ .store = qlcnic_store_beacon,
+};
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static struct bin_attribute bin_attr_npar_config = {
+ .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+ .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pci_config,
+ .write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+ .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+ .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+ .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+ .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
+};
+
+static struct bin_attribute bin_attr_flash = {
+ .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ return;
+
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
+
+ if (device_create_file(dev, &dev_attr_beacon))
+ dev_info(dev, "failed to create beacon sysfs entry");
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ if (device_create_bin_file(dev, &bin_attr_npar_config))
+ dev_info(dev, "failed to create npar config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_pm_config))
+ dev_info(dev, "failed to create pm config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ return;
+
+ device_remove_bin_file(dev, &bin_attr_pci_config);
+ device_remove_file(dev, &dev_attr_beacon);
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ device_remove_bin_file(dev, &bin_attr_npar_config);
+ device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/Makefile b/kernel/drivers/net/ethernet/qlogic/qlge/Makefile
new file mode 100644
index 000000000..8a197658d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge.h b/kernel/drivers/net/ethernet/qlogic/qlge/qlge.h
new file mode 100644
index 000000000..ef332708e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -0,0 +1,2338 @@
+/*
+ * QLogic QLA41xx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qlge for copyright and licensing details.
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME "qlge"
+#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION "1.00.00.34"
+
+#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
+
+#define QLGE_VENDOR_ID 0x1077
+#define QLGE_DEVICE_ID_8012 0x8012
+#define QLGE_DEVICE_ID_8000 0x8000
+#define QLGE_MEZZ_SSYS_ID_068 0x0068
+#define QLGE_MEZZ_SSYS_ID_180 0x0180
+#define MAX_CPUS 8
+#define MAX_TX_RINGS MAX_CPUS
+#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
+
+#define NUM_TX_RING_ENTRIES 256
+#define NUM_RX_RING_ENTRIES 256
+
+#define NUM_SMALL_BUFFERS 512
+#define NUM_LARGE_BUFFERS 512
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+ (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+ (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+
+#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
+
+#define MAX_CQ 128
+#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 100
+
+
+#define TX_DESC_PER_IOCB 8
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#else /* all other page sizes */
+#define TX_DESC_PER_OAL 0
+#endif
+
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
+/* MPI test register definitions. This register
+ * is used for determining alternate NIC function's
+ * PCI->func number.
+ */
+enum {
+ MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
+ MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
+ MPI_TEST_NIC2_FUNC_SHIFT = 5,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
+};
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+ /* Misc. stuff */
+ MAILBOX_COUNT = 16,
+ MAILBOX_TIMEOUT = 5,
+
+ PROC_ADDR_RDY = (1 << 31),
+ PROC_ADDR_R = (1 << 30),
+ PROC_ADDR_ERR = (1 << 29),
+ PROC_ADDR_DA = (1 << 28),
+ PROC_ADDR_FUNC0_MBI = 0x00001180,
+ PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+ PROC_ADDR_FUNC0_CTL = 0x000011a1,
+ PROC_ADDR_FUNC2_MBI = 0x00001280,
+ PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+ PROC_ADDR_FUNC2_CTL = 0x000012a1,
+ PROC_ADDR_MPI_RISC = 0x00000000,
+ PROC_ADDR_MDE = 0x00010000,
+ PROC_ADDR_REGBLOCK = 0x00020000,
+ PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+ SYS_EFE = (1 << 0),
+ SYS_FAE = (1 << 1),
+ SYS_MDC = (1 << 2),
+ SYS_DST = (1 << 3),
+ SYS_DWC = (1 << 4),
+ SYS_EVW = (1 << 5),
+ SYS_OMP_DLY_MASK = 0x3f000000,
+ /*
+ * There are no values defined as of edit #15.
+ */
+ SYS_ODI = (1 << 14),
+};
+
+/*
+ * Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+ RST_FO_TFO = (1 << 0),
+ RST_FO_RR_MASK = 0x00060000,
+ RST_FO_RR_CQ_CAM = 0x00000000,
+ RST_FO_RR_DROP = 0x00000002,
+ RST_FO_RR_DQ = 0x00000004,
+ RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
+ RST_FO_FRB = (1 << 12),
+ RST_FO_MOP = (1 << 13),
+ RST_FO_REG = (1 << 14),
+ RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+ FSC_DBRST_MASK = 0x00070000,
+ FSC_DBRST_256 = 0x00000000,
+ FSC_DBRST_512 = 0x00000001,
+ FSC_DBRST_768 = 0x00000002,
+ FSC_DBRST_1024 = 0x00000003,
+ FSC_DBL_MASK = 0x00180000,
+ FSC_DBL_DBRST = 0x00000000,
+ FSC_DBL_MAX_PLD = 0x00000008,
+ FSC_DBL_MAX_BRST = 0x00000010,
+ FSC_DBL_128_BYTES = 0x00000018,
+ FSC_EC = (1 << 5),
+ FSC_EPC_MASK = 0x00c00000,
+ FSC_EPC_INBOUND = (1 << 6),
+ FSC_EPC_OUTBOUND = (1 << 7),
+ FSC_VM_PAGESIZE_MASK = 0x07000000,
+ FSC_VM_PAGE_2K = 0x00000100,
+ FSC_VM_PAGE_4K = 0x00000200,
+ FSC_VM_PAGE_8K = 0x00000300,
+ FSC_VM_PAGE_64K = 0x00000600,
+ FSC_SH = (1 << 11),
+ FSC_DSB = (1 << 12),
+ FSC_STE = (1 << 13),
+ FSC_FE = (1 << 15),
+};
+
+/*
+ * Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+ CSR_ERR_STS_MASK = 0x0000003f,
+ /*
+ * There are no valued defined as of edit #15.
+ */
+ CSR_RR = (1 << 8),
+ CSR_HRI = (1 << 9),
+ CSR_RP = (1 << 10),
+ CSR_CMD_PARM_SHIFT = 22,
+ CSR_CMD_NOP = 0x00000000,
+ CSR_CMD_SET_RST = 0x10000000,
+ CSR_CMD_CLR_RST = 0x20000000,
+ CSR_CMD_SET_PAUSE = 0x30000000,
+ CSR_CMD_CLR_PAUSE = 0x40000000,
+ CSR_CMD_SET_H2R_INT = 0x50000000,
+ CSR_CMD_CLR_H2R_INT = 0x60000000,
+ CSR_CMD_PAR_EN = 0x70000000,
+ CSR_CMD_SET_BAD_PAR = 0x80000000,
+ CSR_CMD_CLR_BAD_PAR = 0x90000000,
+ CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ * Configuration Register (CFG) bit definitions.
+ */
+enum {
+ CFG_LRQ = (1 << 0),
+ CFG_DRQ = (1 << 1),
+ CFG_LR = (1 << 2),
+ CFG_DR = (1 << 3),
+ CFG_LE = (1 << 5),
+ CFG_LCQ = (1 << 6),
+ CFG_DCQ = (1 << 7),
+ CFG_Q_SHIFT = 8,
+ CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ * Status Register (STS) bit definitions.
+ */
+enum {
+ STS_FE = (1 << 0),
+ STS_PI = (1 << 1),
+ STS_PL0 = (1 << 2),
+ STS_PL1 = (1 << 3),
+ STS_PI0 = (1 << 4),
+ STS_PI1 = (1 << 5),
+ STS_FUNC_ID_MASK = 0x000000c0,
+ STS_FUNC_ID_SHIFT = 6,
+ STS_F0E = (1 << 8),
+ STS_F1E = (1 << 9),
+ STS_F2E = (1 << 10),
+ STS_F3E = (1 << 11),
+ STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+ INTR_EN_INTR_MASK = 0x007f0000,
+ INTR_EN_TYPE_MASK = 0x03000000,
+ INTR_EN_TYPE_ENABLE = 0x00000100,
+ INTR_EN_TYPE_DISABLE = 0x00000200,
+ INTR_EN_TYPE_READ = 0x00000300,
+ INTR_EN_IHD = (1 << 13),
+ INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+ INTR_EN_EI = (1 << 14),
+ INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+ INTR_MASK_PI = (1 << 0),
+ INTR_MASK_HL0 = (1 << 1),
+ INTR_MASK_LH0 = (1 << 2),
+ INTR_MASK_HL1 = (1 << 3),
+ INTR_MASK_LH1 = (1 << 4),
+ INTR_MASK_SE = (1 << 5),
+ INTR_MASK_LSC = (1 << 6),
+ INTR_MASK_MC = (1 << 7),
+ INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ * Register (REV_ID) bit definitions.
+ */
+enum {
+ REV_ID_MASK = 0x0000000f,
+ REV_ID_NICROLL_SHIFT = 0,
+ REV_ID_NICREV_SHIFT = 4,
+ REV_ID_XGROLL_SHIFT = 8,
+ REV_ID_XGREV_SHIFT = 12,
+ REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+ FRC_ECC_ERR_VW = (1 << 12),
+ FRC_ECC_ERR_VB = (1 << 13),
+ FRC_ECC_ERR_NI = (1 << 14),
+ FRC_ECC_ERR_NO = (1 << 15),
+ FRC_ECC_PFE_SHIFT = 16,
+ FRC_ECC_ERR_DO = (1 << 18),
+ FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ * Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+ ERR_STS_NOF = (1 << 0),
+ ERR_STS_NIF = (1 << 1),
+ ERR_STS_DRP = (1 << 2),
+ ERR_STS_XGP = (1 << 3),
+ ERR_STS_FOU = (1 << 4),
+ ERR_STS_FOC = (1 << 5),
+ ERR_STS_FOF = (1 << 6),
+ ERR_STS_FIU = (1 << 7),
+ ERR_STS_FIC = (1 << 8),
+ ERR_STS_FIF = (1 << 9),
+ ERR_STS_MOF = (1 << 10),
+ ERR_STS_TA = (1 << 11),
+ ERR_STS_MA = (1 << 12),
+ ERR_STS_MPE = (1 << 13),
+ ERR_STS_SCE = (1 << 14),
+ ERR_STS_STE = (1 << 15),
+ ERR_STS_FOW = (1 << 16),
+ ERR_STS_UE = (1 << 17),
+ ERR_STS_MCH = (1 << 26),
+ ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+ RAM_DBG_ADDR_FW = (1 << 30),
+ RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+ /*
+ * Example:
+ * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+ */
+ SEM_CLEAR = 0,
+ SEM_SET = 1,
+ SEM_FORCE = 3,
+ SEM_XGMAC0_SHIFT = 0,
+ SEM_XGMAC1_SHIFT = 2,
+ SEM_ICB_SHIFT = 4,
+ SEM_MAC_ADDR_SHIFT = 6,
+ SEM_FLASH_SHIFT = 8,
+ SEM_PROBE_SHIFT = 10,
+ SEM_RT_IDX_SHIFT = 12,
+ SEM_PROC_REG_SHIFT = 14,
+ SEM_XGMAC0_MASK = 0x00030000,
+ SEM_XGMAC1_MASK = 0x000c0000,
+ SEM_ICB_MASK = 0x00300000,
+ SEM_MAC_ADDR_MASK = 0x00c00000,
+ SEM_FLASH_MASK = 0x03000000,
+ SEM_PROBE_MASK = 0x0c000000,
+ SEM_RT_IDX_MASK = 0x30000000,
+ SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+ XGMAC_ADDR_RDY = (1 << 31),
+ XGMAC_ADDR_R = (1 << 30),
+ XGMAC_ADDR_XME = (1 << 29),
+
+ /* XGMAC control registers */
+ PAUSE_SRC_LO = 0x00000100,
+ PAUSE_SRC_HI = 0x00000104,
+ GLOBAL_CFG = 0x00000108,
+ GLOBAL_CFG_RESET = (1 << 0),
+ GLOBAL_CFG_JUMBO = (1 << 6),
+ GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+ GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+ TX_CFG = 0x0000010c,
+ TX_CFG_RESET = (1 << 0),
+ TX_CFG_EN = (1 << 1),
+ TX_CFG_PREAM = (1 << 2),
+ RX_CFG = 0x00000110,
+ RX_CFG_RESET = (1 << 0),
+ RX_CFG_EN = (1 << 1),
+ RX_CFG_PREAM = (1 << 2),
+ FLOW_CTL = 0x0000011c,
+ PAUSE_OPCODE = 0x00000120,
+ PAUSE_TIMER = 0x00000124,
+ PAUSE_FRM_DEST_LO = 0x00000128,
+ PAUSE_FRM_DEST_HI = 0x0000012c,
+ MAC_TX_PARAMS = 0x00000134,
+ MAC_TX_PARAMS_JUMBO = (1 << 31),
+ MAC_TX_PARAMS_SIZE_SHIFT = 16,
+ MAC_RX_PARAMS = 0x00000138,
+ MAC_SYS_INT = 0x00000144,
+ MAC_SYS_INT_MASK = 0x00000148,
+ MAC_MGMT_INT = 0x0000014c,
+ MAC_MGMT_IN_MASK = 0x00000150,
+ EXT_ARB_MODE = 0x000001fc,
+
+ /* XGMAC TX statistics registers */
+ TX_PKTS = 0x00000200,
+ TX_BYTES = 0x00000208,
+ TX_MCAST_PKTS = 0x00000210,
+ TX_BCAST_PKTS = 0x00000218,
+ TX_UCAST_PKTS = 0x00000220,
+ TX_CTL_PKTS = 0x00000228,
+ TX_PAUSE_PKTS = 0x00000230,
+ TX_64_PKT = 0x00000238,
+ TX_65_TO_127_PKT = 0x00000240,
+ TX_128_TO_255_PKT = 0x00000248,
+ TX_256_511_PKT = 0x00000250,
+ TX_512_TO_1023_PKT = 0x00000258,
+ TX_1024_TO_1518_PKT = 0x00000260,
+ TX_1519_TO_MAX_PKT = 0x00000268,
+ TX_UNDERSIZE_PKT = 0x00000270,
+ TX_OVERSIZE_PKT = 0x00000278,
+
+ /* XGMAC statistics control registers */
+ RX_HALF_FULL_DET = 0x000002a0,
+ TX_HALF_FULL_DET = 0x000002a4,
+ RX_OVERFLOW_DET = 0x000002a8,
+ TX_OVERFLOW_DET = 0x000002ac,
+ RX_HALF_FULL_MASK = 0x000002b0,
+ TX_HALF_FULL_MASK = 0x000002b4,
+ RX_OVERFLOW_MASK = 0x000002b8,
+ TX_OVERFLOW_MASK = 0x000002bc,
+ STAT_CNT_CTL = 0x000002c0,
+ STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+ STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+ AUX_RX_HALF_FULL_DET = 0x000002d0,
+ AUX_TX_HALF_FULL_DET = 0x000002d4,
+ AUX_RX_OVERFLOW_DET = 0x000002d8,
+ AUX_TX_OVERFLOW_DET = 0x000002dc,
+ AUX_RX_HALF_FULL_MASK = 0x000002f0,
+ AUX_TX_HALF_FULL_MASK = 0x000002f4,
+ AUX_RX_OVERFLOW_MASK = 0x000002f8,
+ AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+ /* XGMAC RX statistics registers */
+ RX_BYTES = 0x00000300,
+ RX_BYTES_OK = 0x00000308,
+ RX_PKTS = 0x00000310,
+ RX_PKTS_OK = 0x00000318,
+ RX_BCAST_PKTS = 0x00000320,
+ RX_MCAST_PKTS = 0x00000328,
+ RX_UCAST_PKTS = 0x00000330,
+ RX_UNDERSIZE_PKTS = 0x00000338,
+ RX_OVERSIZE_PKTS = 0x00000340,
+ RX_JABBER_PKTS = 0x00000348,
+ RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+ RX_DROP_EVENTS = 0x00000358,
+ RX_FCERR_PKTS = 0x00000360,
+ RX_ALIGN_ERR = 0x00000368,
+ RX_SYMBOL_ERR = 0x00000370,
+ RX_MAC_ERR = 0x00000378,
+ RX_CTL_PKTS = 0x00000380,
+ RX_PAUSE_PKTS = 0x00000388,
+ RX_64_PKTS = 0x00000390,
+ RX_65_TO_127_PKTS = 0x00000398,
+ RX_128_255_PKTS = 0x000003a0,
+ RX_256_511_PKTS = 0x000003a8,
+ RX_512_TO_1023_PKTS = 0x000003b0,
+ RX_1024_TO_1518_PKTS = 0x000003b8,
+ RX_1519_TO_MAX_PKTS = 0x000003c0,
+ RX_LEN_ERR_PKTS = 0x000003c8,
+
+ /* XGMAC MDIO control registers */
+ MDIO_TX_DATA = 0x00000400,
+ MDIO_RX_DATA = 0x00000410,
+ MDIO_CMD = 0x00000420,
+ MDIO_PHY_ADDR = 0x00000430,
+ MDIO_PORT = 0x00000440,
+ MDIO_STATUS = 0x00000450,
+
+ XGMAC_REGISTER_END = 0x00000740,
+};
+
+/*
+ * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+ ETS_QUEUE_SHIFT = 29,
+ ETS_REF = (1 << 26),
+ ETS_RS = (1 << 27),
+ ETS_P = (1 << 28),
+ ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ * Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+ FLASH_ADDR_RDY = (1 << 31),
+ FLASH_ADDR_R = (1 << 30),
+ FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ * Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+ CQ_STOP_QUEUE_MASK = (0x007f0000),
+ CQ_STOP_TYPE_MASK = (0x03000000),
+ CQ_STOP_TYPE_START = 0x00000100,
+ CQ_STOP_TYPE_STOP = 0x00000200,
+ CQ_STOP_TYPE_READ = 0x00000300,
+ CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+ MAC_ADDR_IDX_SHIFT = 4,
+ MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
+ MAC_ADDR_TYPE_MASK = 0x000f0000,
+ MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+ MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+ MAC_ADDR_TYPE_VLAN = 0x00020000,
+ MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+ MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+ MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+ MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+ MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+ MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+ MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+ MAC_ADDR_ADR = (1 << 25),
+ MAC_ADDR_RS = (1 << 26),
+ MAC_ADDR_E = (1 << 27),
+ MAC_ADDR_MR = (1 << 30),
+ MAC_ADDR_MW = (1 << 31),
+ MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
+};
+
+/*
+ * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+ SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+ FC_RCV_CFG_ECT = (1 << 15),
+ FC_RCV_CFG_DFH = (1 << 20),
+ FC_RCV_CFG_DVF = (1 << 21),
+ FC_RCV_CFG_RCE = (1 << 27),
+ FC_RCV_CFG_RFE = (1 << 28),
+ FC_RCV_CFG_TEE = (1 << 29),
+ FC_RCV_CFG_TCE = (1 << 30),
+ FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+ NIC_RCV_CFG_PPE = (1 << 0),
+ NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+ NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+ NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+ NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+ NIC_RCV_CFG_RV = (1 << 3),
+ NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+ NIC_RCV_CFG_DFQ_SHIFT = 8,
+ NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
+};
+
+/*
+ * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+ MGMT_RCV_CFG_ARP = (1 << 0),
+ MGMT_RCV_CFG_DHC = (1 << 1),
+ MGMT_RCV_CFG_DHS = (1 << 2),
+ MGMT_RCV_CFG_NP = (1 << 3),
+ MGMT_RCV_CFG_I6N = (1 << 4),
+ MGMT_RCV_CFG_I6R = (1 << 5),
+ MGMT_RCV_CFG_DH6 = (1 << 6),
+ MGMT_RCV_CFG_UD1 = (1 << 7),
+ MGMT_RCV_CFG_UD0 = (1 << 8),
+ MGMT_RCV_CFG_BCT = (1 << 9),
+ MGMT_RCV_CFG_MCT = (1 << 10),
+ MGMT_RCV_CFG_DM = (1 << 11),
+ MGMT_RCV_CFG_RM = (1 << 12),
+ MGMT_RCV_CFG_STL = (1 << 13),
+ MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+ MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+ MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+ MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+ MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ * Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+ RT_IDX_IDX_SHIFT = 8,
+ RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
+ RT_IDX_TYPE_RT = 0x00000000,
+ RT_IDX_TYPE_RT_INV = 0x00010000,
+ RT_IDX_TYPE_NICQ = 0x00020000,
+ RT_IDX_TYPE_NICQ_INV = 0x00030000,
+ RT_IDX_DST_MASK = 0x00700000,
+ RT_IDX_DST_RSS = 0x00000000,
+ RT_IDX_DST_CAM_Q = 0x00100000,
+ RT_IDX_DST_COS_Q = 0x00200000,
+ RT_IDX_DST_DFLT_Q = 0x00300000,
+ RT_IDX_DST_DEST_Q = 0x00400000,
+ RT_IDX_RS = (1 << 26),
+ RT_IDX_E = (1 << 27),
+ RT_IDX_MR = (1 << 30),
+ RT_IDX_MW = (1 << 31),
+
+ /* Nic Queue format - type 2 bits */
+ RT_IDX_BCAST = (1 << 0),
+ RT_IDX_MCAST = (1 << 1),
+ RT_IDX_MCAST_MATCH = (1 << 2),
+ RT_IDX_MCAST_REG_MATCH = (1 << 3),
+ RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+ RT_IDX_FC_MACH = (1 << 5),
+ RT_IDX_ETH_FCOE = (1 << 6),
+ RT_IDX_CAM_HIT = (1 << 7),
+ RT_IDX_CAM_BIT0 = (1 << 8),
+ RT_IDX_CAM_BIT1 = (1 << 9),
+ RT_IDX_VLAN_TAG = (1 << 10),
+ RT_IDX_VLAN_MATCH = (1 << 11),
+ RT_IDX_VLAN_FILTER = (1 << 12),
+ RT_IDX_ETH_SKIP1 = (1 << 13),
+ RT_IDX_ETH_SKIP2 = (1 << 14),
+ RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+ RT_IDX_802_3 = (1 << 16),
+ RT_IDX_LLDP = (1 << 17),
+ RT_IDX_UNUSED018 = (1 << 18),
+ RT_IDX_UNUSED019 = (1 << 19),
+ RT_IDX_UNUSED20 = (1 << 20),
+ RT_IDX_UNUSED21 = (1 << 21),
+ RT_IDX_ERR = (1 << 22),
+ RT_IDX_VALID = (1 << 23),
+ RT_IDX_TU_CSUM_ERR = (1 << 24),
+ RT_IDX_IP_CSUM_ERR = (1 << 25),
+ RT_IDX_MAC_ERR = (1 << 26),
+ RT_IDX_RSS_TCP6 = (1 << 27),
+ RT_IDX_RSS_TCP4 = (1 << 28),
+ RT_IDX_RSS_IPV6 = (1 << 29),
+ RT_IDX_RSS_IPV4 = (1 << 30),
+ RT_IDX_RSS_MATCH = (1 << 31),
+
+ /* Hierarchy for the NIC Queue Mask */
+ RT_IDX_ALL_ERR_SLOT = 0,
+ RT_IDX_MAC_ERR_SLOT = 0,
+ RT_IDX_IP_CSUM_ERR_SLOT = 1,
+ RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+ RT_IDX_BCAST_SLOT = 3,
+ RT_IDX_MCAST_MATCH_SLOT = 4,
+ RT_IDX_ALLMULTI_SLOT = 5,
+ RT_IDX_UNUSED6_SLOT = 6,
+ RT_IDX_UNUSED7_SLOT = 7,
+ RT_IDX_RSS_MATCH_SLOT = 8,
+ RT_IDX_RSS_IPV4_SLOT = 8,
+ RT_IDX_RSS_IPV6_SLOT = 9,
+ RT_IDX_RSS_TCP4_SLOT = 10,
+ RT_IDX_RSS_TCP6_SLOT = 11,
+ RT_IDX_CAM_HIT_SLOT = 12,
+ RT_IDX_UNUSED013 = 13,
+ RT_IDX_UNUSED014 = 14,
+ RT_IDX_PROMISCUOUS_SLOT = 15,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+ PROC_ADDR = 0, /* Use semaphore */
+ PROC_DATA = 0x04, /* Use semaphore */
+ SYS = 0x08,
+ RST_FO = 0x0c,
+ FSC = 0x10,
+ CSR = 0x14,
+ LED = 0x18,
+ ICB_RID = 0x1c, /* Use semaphore */
+ ICB_L = 0x20, /* Use semaphore */
+ ICB_H = 0x24, /* Use semaphore */
+ CFG = 0x28,
+ BIOS_ADDR = 0x2c,
+ STS = 0x30,
+ INTR_EN = 0x34,
+ INTR_MASK = 0x38,
+ ISR1 = 0x3c,
+ ISR2 = 0x40,
+ ISR3 = 0x44,
+ ISR4 = 0x48,
+ REV_ID = 0x4c,
+ FRC_ECC_ERR = 0x50,
+ ERR_STS = 0x54,
+ RAM_DBG_ADDR = 0x58,
+ RAM_DBG_DATA = 0x5c,
+ ECC_ERR_CNT = 0x60,
+ SEM = 0x64,
+ GPIO_1 = 0x68, /* Use semaphore */
+ GPIO_2 = 0x6c, /* Use semaphore */
+ GPIO_3 = 0x70, /* Use semaphore */
+ RSVD2 = 0x74,
+ XGMAC_ADDR = 0x78, /* Use semaphore */
+ XGMAC_DATA = 0x7c, /* Use semaphore */
+ NIC_ETS = 0x80,
+ CNA_ETS = 0x84,
+ FLASH_ADDR = 0x88, /* Use semaphore */
+ FLASH_DATA = 0x8c, /* Use semaphore */
+ CQ_STOP = 0x90,
+ PAGE_TBL_RID = 0x94,
+ WQ_PAGE_TBL_LO = 0x98,
+ WQ_PAGE_TBL_HI = 0x9c,
+ CQ_PAGE_TBL_LO = 0xa0,
+ CQ_PAGE_TBL_HI = 0xa4,
+ MAC_ADDR_IDX = 0xa8, /* Use semaphore */
+ MAC_ADDR_DATA = 0xac, /* Use semaphore */
+ COS_DFLT_CQ1 = 0xb0,
+ COS_DFLT_CQ2 = 0xb4,
+ ETYPE_SKIP1 = 0xb8,
+ ETYPE_SKIP2 = 0xbc,
+ SPLT_HDR = 0xc0,
+ FC_PAUSE_THRES = 0xc4,
+ NIC_PAUSE_THRES = 0xc8,
+ FC_ETHERTYPE = 0xcc,
+ FC_RCV_CFG = 0xd0,
+ NIC_RCV_CFG = 0xd4,
+ FC_COS_TAGS = 0xd8,
+ NIC_COS_TAGS = 0xdc,
+ MGMT_RCV_CFG = 0xe0,
+ RT_IDX = 0xe4,
+ RT_DATA = 0xe8,
+ RSVD7 = 0xec,
+ XG_SERDES_ADDR = 0xf0,
+ XG_SERDES_DATA = 0xf4,
+ PRB_MX_ADDR = 0xf8, /* Use semaphore */
+ PRB_MX_DATA = 0xfc, /* Use semaphore */
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
+/*
+ * CAM output format.
+ */
+enum {
+ CAM_OUT_ROUTE_FC = 0,
+ CAM_OUT_ROUTE_NIC = 1,
+ CAM_OUT_FUNC_SHIFT = 2,
+ CAM_OUT_RV = (1 << 4),
+ CAM_OUT_SH = (1 << 15),
+ CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox definitions
+ */
+enum {
+ /* Asynchronous Event Notifications */
+ AEN_SYS_ERR = 0x00008002,
+ AEN_LINK_UP = 0x00008011,
+ AEN_LINK_DOWN = 0x00008012,
+ AEN_IDC_CMPLT = 0x00008100,
+ AEN_IDC_REQ = 0x00008101,
+ AEN_IDC_EXT = 0x00008102,
+ AEN_DCBX_CHG = 0x00008110,
+ AEN_AEN_LOST = 0x00008120,
+ AEN_AEN_SFP_IN = 0x00008130,
+ AEN_AEN_SFP_OUT = 0x00008131,
+ AEN_FW_INIT_DONE = 0x00008400,
+ AEN_FW_INIT_FAIL = 0x00008401,
+
+ /* Mailbox Command Opcodes. */
+ MB_CMD_NOP = 0x00000000,
+ MB_CMD_EX_FW = 0x00000002,
+ MB_CMD_MB_TEST = 0x00000006,
+ MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
+ MB_CMD_ABOUT_FW = 0x00000008,
+ MB_CMD_COPY_RISC_RAM = 0x0000000a,
+ MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+ MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+ MB_CMD_WRITE_RAM = 0x0000000d,
+ MB_CMD_INIT_RISC_RAM = 0x0000000e,
+ MB_CMD_READ_RAM = 0x0000000f,
+ MB_CMD_STOP_FW = 0x00000014,
+ MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+ MB_CMD_WRITE_SFP = 0x00000030,
+ MB_CMD_READ_SFP = 0x00000031,
+ MB_CMD_INIT_FW = 0x00000060,
+ MB_CMD_GET_IFCB = 0x00000061,
+ MB_CMD_GET_FW_STATE = 0x00000069,
+ MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
+ MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
+ MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
+ MB_WOL_DISABLE = 0,
+ MB_WOL_MAGIC_PKT = (1 << 1),
+ MB_WOL_FLTR = (1 << 2),
+ MB_WOL_UCAST = (1 << 3),
+ MB_WOL_MCAST = (1 << 4),
+ MB_WOL_BCAST = (1 << 5),
+ MB_WOL_LINK_UP = (1 << 6),
+ MB_WOL_LINK_DOWN = (1 << 7),
+ MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
+ MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
+ MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
+ MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
+ MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
+ MB_CMD_SET_WOL_IMMED = 0x00000115,
+ MB_CMD_PORT_RESET = 0x00000120,
+ MB_CMD_SET_PORT_CFG = 0x00000122,
+ MB_CMD_GET_PORT_CFG = 0x00000123,
+ MB_CMD_GET_LINK_STS = 0x00000124,
+ MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+ QL_LED_BLINK = 0x03e803e8,
+ MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
+ MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
+ MB_SET_MPI_TFK_STOP = (1 << 0),
+ MB_SET_MPI_TFK_RESUME = (1 << 1),
+ MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
+ MB_GET_MPI_TFK_STOPPED = (1 << 0),
+ MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
+ /* Sub-commands for IDC request.
+ * This describes the reason for the
+ * IDC request.
+ */
+ MB_CMD_IOP_NONE = 0x0000,
+ MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
+ MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
+ MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
+ MB_CMD_IOP_DVR_START = 0x0100,
+ MB_CMD_IOP_FLASH_ACC = 0x0101,
+ MB_CMD_IOP_RESTART_MPI = 0x0102,
+ MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
+
+ /* Mailbox Command Status. */
+ MB_CMD_STS_GOOD = 0x00004000, /* Success. */
+ MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
+ MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
+ MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
+ MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
+ MB_CMD_STS_ERR = 0x00004005, /* System Error. */
+ MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
+};
+
+struct mbox_params {
+ u32 mbox_in[MAILBOX_COUNT];
+ u32 mbox_out[MAILBOX_COUNT];
+ int in_count;
+ int out_count;
+};
+
+struct flash_params_8012 {
+ u8 dev_id_str[4];
+ __le16 size;
+ __le16 csum;
+ __le16 ver;
+ __le16 sub_dev_id;
+ u8 mac_addr[6];
+ __le16 res;
+};
+
+/* 8000 device's flash is a different structure
+ * at a different offset in flash.
+ */
+#define FUNC0_FLASH_OFFSET 0x140200
+#define FUNC1_FLASH_OFFSET 0x140600
+
+/* Flash related data structures. */
+struct flash_params_8000 {
+ u8 dev_id_str[4]; /* "8000" */
+ __le16 ver;
+ __le16 size;
+ __le16 csum;
+ __le16 reserved0;
+ __le16 total_size;
+ __le16 entry_count;
+ u8 data_type0;
+ u8 data_size0;
+ u8 mac_addr[6];
+ u8 data_type1;
+ u8 data_size1;
+ u8 mac_addr1[6];
+ u8 data_type2;
+ u8 data_size2;
+ __le16 vlan_id;
+ u8 data_type3;
+ u8 data_size3;
+ __le16 last;
+ u8 reserved1[464];
+ __le16 subsys_ven_id;
+ __le16 subsys_dev_id;
+ u8 reserved2[4];
+};
+
+union flash_params {
+ struct flash_params_8012 flash_params_8012;
+ struct flash_params_8000 flash_params_8000;
+};
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+ u32 cnsmr_idx; /* 0x00 */
+ u32 valid; /* 0x04 */
+ u32 reserved[4]; /* 0x08-0x14 */
+ u32 lbq_prod_idx; /* 0x18 */
+ u32 sbq_prod_idx; /* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+ u32 prod_idx; /* 0x00 */
+ u32 valid; /* 0x04 */
+ u32 reserved[4]; /* 0x08-0x14 */
+ u32 lbq_prod_idx; /* 0x18 */
+ u32 sbq_prod_idx; /* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+struct tx_buf_desc {
+ __le64 addr;
+ __le32 len;
+#define TX_DESC_LEN_MASK 0x000fffff
+#define TX_DESC_C 0x40000000
+#define TX_DESC_E 0x80000000
+} __packed;
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB 0x01
+#define OPCODE_OB_MAC_TSO_IOCB 0x02
+#define OPCODE_IB_MAC_IOCB 0x20
+#define OPCODE_IB_MPI_IOCB 0x21
+#define OPCODE_IB_AE_IOCB 0x3f
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_IOCB_REQ_OI 0x01
+#define OB_MAC_IOCB_REQ_I 0x02
+#define OB_MAC_IOCB_REQ_D 0x08
+#define OB_MAC_IOCB_REQ_F 0x10
+ u8 flags2;
+ u8 flags3;
+#define OB_MAC_IOCB_DFP 0x02
+#define OB_MAC_IOCB_V 0x04
+ __le32 reserved1[2];
+ __le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+ __le16 reserved2;
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved3;
+ __le16 vlan_tci;
+ __le16 reserved4;
+ struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_iocb_rsp {
+ u8 opcode; /* */
+ u8 flags1; /* */
+#define OB_MAC_IOCB_RSP_OI 0x01 /* */
+#define OB_MAC_IOCB_RSP_I 0x02 /* */
+#define OB_MAC_IOCB_RSP_E 0x08 /* */
+#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
+#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
+#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
+ u8 flags2; /* */
+ u8 flags3; /* */
+#define OB_MAC_IOCB_RSP_B 0x80 /* */
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved[13];
+} __packed;
+
+struct ob_mac_tso_iocb_req {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_TSO_IOCB_OI 0x01
+#define OB_MAC_TSO_IOCB_I 0x02
+#define OB_MAC_TSO_IOCB_D 0x08
+#define OB_MAC_TSO_IOCB_IP4 0x40
+#define OB_MAC_TSO_IOCB_IP6 0x80
+ u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO 0x20
+#define OB_MAC_TSO_IOCB_UC 0x40
+#define OB_MAC_TSO_IOCB_TC 0x80
+ u8 flags3;
+#define OB_MAC_TSO_IOCB_IC 0x01
+#define OB_MAC_TSO_IOCB_DFP 0x02
+#define OB_MAC_TSO_IOCB_V 0x04
+ __le32 reserved1[2];
+ __le32 frame_len;
+ u32 tid;
+ u32 txq_idx;
+ __le16 total_hdrs_len;
+ __le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+ __le16 vlan_tci;
+ __le16 mss;
+ struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_tso_iocb_rsp {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI 0x01
+#define OB_MAC_TSO_IOCB_RSP_I 0x02
+#define OB_MAC_TSO_IOCB_RSP_E 0x08
+#define OB_MAC_TSO_IOCB_RSP_S 0x10
+#define OB_MAC_TSO_IOCB_RSP_L 0x20
+#define OB_MAC_TSO_IOCB_RSP_P 0x40
+ u8 flags2; /* */
+ u8 flags3; /* */
+#define OB_MAC_TSO_IOCB_RSP_B 0x8000
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved2[13];
+} __packed;
+
+struct ib_mac_iocb_rsp {
+ u8 opcode; /* 0x20 */
+ u8 flags1;
+#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
+#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
+#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
+#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
+ u8 flags2;
+#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
+#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
+#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
+#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
+ u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
+#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
+#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
+ __le32 data_len; /* */
+ __le64 data_addr; /* */
+ __le32 rss; /* */
+ __le16 vlan_id; /* 12 bits */
+#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
+#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
+
+ __le16 reserved1;
+ __le32 reserved2[6];
+ u8 reserved3[3];
+ u8 flags4;
+#define IB_MAC_IOCB_RSP_HV 0x20
+#define IB_MAC_IOCB_RSP_HS 0x40
+#define IB_MAC_IOCB_RSP_HL 0x80
+ __le32 hdr_len; /* */
+ __le64 hdr_addr; /* */
+} __packed;
+
+struct ib_ae_iocb_rsp {
+ u8 opcode;
+ u8 flags1;
+#define IB_AE_IOCB_RSP_OI 0x01
+#define IB_AE_IOCB_RSP_I 0x02
+ u8 event;
+#define LINK_UP_EVENT 0x00
+#define LINK_DOWN_EVENT 0x01
+#define CAM_LOOKUP_ERR_EVENT 0x06
+#define SOFT_ECC_ERROR_EVENT 0x07
+#define MGMT_ERR_EVENT 0x08
+#define TEN_GIG_MAC_EVENT 0x09
+#define GPI0_H2L_EVENT 0x10
+#define GPI0_L2H_EVENT 0x20
+#define GPI1_H2L_EVENT 0x11
+#define GPI1_L2H_EVENT 0x21
+#define PCI_ERR_ANON_BUF_RD 0x40
+ u8 q_id;
+ __le32 reserved[15];
+} __packed;
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+ u8 opcode;
+ u8 flags0;
+ __le16 length;
+ __le32 tid;
+ __le32 reserved[14];
+} __packed;
+
+struct net_req_iocb {
+ u8 opcode;
+ u8 flags0;
+ __le16 flags1;
+ __le32 tid;
+ __le32 reserved1[30];
+} __packed;
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+ __le16 len;
+#define Q_LEN_V (1 << 4)
+#define Q_LEN_CPP_CONT 0x0000
+#define Q_LEN_CPP_16 0x0001
+#define Q_LEN_CPP_32 0x0002
+#define Q_LEN_CPP_64 0x0003
+#define Q_LEN_CPP_512 0x0006
+ __le16 flags;
+#define Q_PRI_SHIFT 1
+#define Q_FLAGS_LC 0x1000
+#define Q_FLAGS_LB 0x2000
+#define Q_FLAGS_LI 0x4000
+#define Q_FLAGS_LO 0x8000
+ __le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+ __le16 rid;
+ __le64 addr;
+ __le64 cnsmr_idx_addr;
+} __packed;
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+ u8 msix_vect;
+ u8 reserved1;
+ u8 reserved2;
+ u8 flags;
+#define FLAGS_LV 0x08
+#define FLAGS_LS 0x10
+#define FLAGS_LL 0x20
+#define FLAGS_LI 0x40
+#define FLAGS_LC 0x80
+ __le16 len;
+#define LEN_V (1 << 4)
+#define LEN_CPP_CONT 0x0000
+#define LEN_CPP_32 0x0001
+#define LEN_CPP_64 0x0002
+#define LEN_CPP_128 0x0003
+ __le16 rid;
+ __le64 addr;
+ __le64 prod_idx_addr;
+ __le16 pkt_delay;
+ __le16 irq_delay;
+ __le64 lbq_addr;
+ __le16 lbq_buf_size;
+ __le16 lbq_len; /* entry count */
+ __le64 sbq_addr;
+ __le16 sbq_buf_size;
+ __le16 sbq_len; /* entry count */
+} __packed;
+
+struct ricb {
+ u8 base_cq;
+#define RSS_L4K 0x80
+ u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI 0x02
+#define RSS_LB 0x04
+#define RSS_LM 0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+ __le16 mask;
+ u8 hash_cq_id[1024];
+ __le32 ipv6_hash_key[10];
+ __le32 ipv4_hash_key[4];
+} __packed;
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+ struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry;
+ u32 index;
+ struct oal oal;
+ struct map_list map[MAX_SKB_FRAGS + 2];
+ int map_cnt;
+ struct tx_ring_desc *next;
+};
+
+struct page_chunk {
+ struct page *page; /* master page */
+ char *va; /* virt addr for this chunk */
+ u64 map; /* mapping for master */
+ unsigned int offset; /* offset for this chunk */
+ unsigned int last_flag; /* flag set for last chunk in page */
+};
+
+struct bq_desc {
+ union {
+ struct page_chunk pg_chunk;
+ struct sk_buff *skb;
+ } p;
+ __le64 *addr;
+ u32 index;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+ /*
+ * queue info.
+ */
+ struct wqicb wqicb; /* structure used to inform chip of new queue */
+ void *wq_base; /* pci_alloc:virtual addr for tx */
+ dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
+ __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
+ dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
+ u32 wq_size; /* size in bytes of queue area */
+ u32 wq_len; /* number of entries in queue */
+ void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
+ void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
+ u16 prod_idx; /* current value for prod idx */
+ u16 cq_id; /* completion (rx) queue for tx completions */
+ u8 wq_id; /* queue id for this entry */
+ u8 reserved1[3];
+ struct tx_ring_desc *q; /* descriptor list for the queue */
+ spinlock_t lock;
+ atomic_t tx_count; /* counts down for every outstanding IO */
+ struct delayed_work tx_work;
+ struct ql_adapter *qdev;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+ DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
+ TX_Q = 3, /* Handles outbound completions. */
+ RX_Q = 4, /* Handles inbound completions. */
+};
+
+struct rx_ring {
+ struct cqicb cqicb; /* The chip's completion queue init control block. */
+
+ /* Completion queue elements. */
+ void *cq_base;
+ dma_addr_t cq_base_dma;
+ u32 cq_size;
+ u32 cq_len;
+ u16 cq_id;
+ __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
+ dma_addr_t prod_idx_sh_reg_dma;
+ void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
+ u32 cnsmr_idx; /* current sw idx */
+ struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
+ void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
+
+ /* Large buffer queue elements. */
+ u32 lbq_len; /* entry count */
+ u32 lbq_size; /* size in bytes of queue */
+ u32 lbq_buf_size;
+ void *lbq_base;
+ dma_addr_t lbq_base_dma;
+ void *lbq_base_indirect;
+ dma_addr_t lbq_base_indirect_dma;
+ struct page_chunk pg_chunk; /* current page for chunks */
+ struct bq_desc *lbq; /* array of control blocks */
+ void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
+ u32 lbq_prod_idx; /* current sw prod idx */
+ u32 lbq_curr_idx; /* next entry we expect */
+ u32 lbq_clean_idx; /* beginning of new descs */
+ u32 lbq_free_cnt; /* free buffer desc cnt */
+
+ /* Small buffer queue elements. */
+ u32 sbq_len; /* entry count */
+ u32 sbq_size; /* size in bytes of queue */
+ u32 sbq_buf_size;
+ void *sbq_base;
+ dma_addr_t sbq_base_dma;
+ void *sbq_base_indirect;
+ dma_addr_t sbq_base_indirect_dma;
+ struct bq_desc *sbq; /* array of control blocks */
+ void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+ u32 sbq_prod_idx; /* current sw prod idx */
+ u32 sbq_curr_idx; /* next entry we expect */
+ u32 sbq_clean_idx; /* beginning of new descs */
+ u32 sbq_free_cnt; /* free buffer desc cnt */
+
+ /* Misc. handler elements. */
+ u32 type; /* Type of queue, tx, rx. */
+ u32 irq; /* Which vector this ring is assigned. */
+ u32 cpu; /* Which CPU this should run on. */
+ char name[IFNAMSIZ + 5];
+ struct napi_struct napi;
+ u8 reserved;
+ struct ql_adapter *qdev;
+ u64 rx_packets;
+ u64 rx_multicast;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+ u8 value[4];
+};
+
+struct nic_stats {
+ /*
+ * These stats come from offset 200h to 278h
+ * in the XGMAC register.
+ */
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_ucast_pkts;
+ u64 tx_ctl_pkts;
+ u64 tx_pause_pkts;
+ u64 tx_64_pkt;
+ u64 tx_65_to_127_pkt;
+ u64 tx_128_to_255_pkt;
+ u64 tx_256_511_pkt;
+ u64 tx_512_to_1023_pkt;
+ u64 tx_1024_to_1518_pkt;
+ u64 tx_1519_to_max_pkt;
+ u64 tx_undersize_pkt;
+ u64 tx_oversize_pkt;
+
+ /*
+ * These stats come from offset 300h to 3C8h
+ * in the XGMAC register.
+ */
+ u64 rx_bytes;
+ u64 rx_bytes_ok;
+ u64 rx_pkts;
+ u64 rx_pkts_ok;
+ u64 rx_bcast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_ucast_pkts;
+ u64 rx_undersize_pkts;
+ u64 rx_oversize_pkts;
+ u64 rx_jabber_pkts;
+ u64 rx_undersize_fcerr_pkts;
+ u64 rx_drop_events;
+ u64 rx_fcerr_pkts;
+ u64 rx_align_err;
+ u64 rx_symbol_err;
+ u64 rx_mac_err;
+ u64 rx_ctl_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_to_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_to_1023_pkts;
+ u64 rx_1024_to_1518_pkts;
+ u64 rx_1519_to_max_pkts;
+ u64 rx_len_err_pkts;
+ /* Receive Mac Err stats */
+ u64 rx_code_err;
+ u64 rx_oversize_err;
+ u64 rx_undersize_err;
+ u64 rx_preamble_err;
+ u64 rx_frame_len_err;
+ u64 rx_crc_err;
+ u64 rx_err_count;
+ /*
+ * These stats come from offset 500h to 5C8h
+ * in the XGMAC register.
+ */
+ u64 tx_cbfc_pause_frames0;
+ u64 tx_cbfc_pause_frames1;
+ u64 tx_cbfc_pause_frames2;
+ u64 tx_cbfc_pause_frames3;
+ u64 tx_cbfc_pause_frames4;
+ u64 tx_cbfc_pause_frames5;
+ u64 tx_cbfc_pause_frames6;
+ u64 tx_cbfc_pause_frames7;
+ u64 rx_cbfc_pause_frames0;
+ u64 rx_cbfc_pause_frames1;
+ u64 rx_cbfc_pause_frames2;
+ u64 rx_cbfc_pause_frames3;
+ u64 rx_cbfc_pause_frames4;
+ u64 rx_cbfc_pause_frames5;
+ u64 rx_cbfc_pause_frames6;
+ u64 rx_cbfc_pause_frames7;
+ u64 rx_nic_fifo_drop;
+};
+
+/* Firmware coredump internal register address/length pairs. */
+enum {
+ MPI_CORE_REGS_ADDR = 0x00030000,
+ MPI_CORE_REGS_CNT = 127,
+ MPI_CORE_SH_REGS_CNT = 16,
+ TEST_REGS_ADDR = 0x00001000,
+ TEST_REGS_CNT = 23,
+ RMII_REGS_ADDR = 0x00001040,
+ RMII_REGS_CNT = 64,
+ FCMAC1_REGS_ADDR = 0x00001080,
+ FCMAC2_REGS_ADDR = 0x000010c0,
+ FCMAC_REGS_CNT = 64,
+ FC1_MBX_REGS_ADDR = 0x00001100,
+ FC2_MBX_REGS_ADDR = 0x00001240,
+ FC_MBX_REGS_CNT = 64,
+ IDE_REGS_ADDR = 0x00001140,
+ IDE_REGS_CNT = 64,
+ NIC1_MBX_REGS_ADDR = 0x00001180,
+ NIC2_MBX_REGS_ADDR = 0x00001280,
+ NIC_MBX_REGS_CNT = 64,
+ SMBUS_REGS_ADDR = 0x00001200,
+ SMBUS_REGS_CNT = 64,
+ I2C_REGS_ADDR = 0x00001fc0,
+ I2C_REGS_CNT = 64,
+ MEMC_REGS_ADDR = 0x00003000,
+ MEMC_REGS_CNT = 256,
+ PBUS_REGS_ADDR = 0x00007c00,
+ PBUS_REGS_CNT = 256,
+ MDE_REGS_ADDR = 0x00010000,
+ MDE_REGS_CNT = 6,
+ CODE_RAM_ADDR = 0x00020000,
+ CODE_RAM_CNT = 0x2000,
+ MEMC_RAM_ADDR = 0x00100000,
+ MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+ u32 cookie;
+ u8 idString[16];
+ u32 timeLo;
+ u32 timeHi;
+ u32 imageSize;
+ u32 headerSize;
+ u8 info[220];
+};
+
+struct mpi_coredump_segment_header {
+ u32 cookie;
+ u32 segNum;
+ u32 segSize;
+ u32 extra;
+ u8 description[16];
+};
+
+/* Firmware coredump header segment numbers. */
+enum {
+ CORE_SEG_NUM = 1,
+ TEST_LOGIC_SEG_NUM = 2,
+ RMII_SEG_NUM = 3,
+ FCMAC1_SEG_NUM = 4,
+ FCMAC2_SEG_NUM = 5,
+ FC1_MBOX_SEG_NUM = 6,
+ IDE_SEG_NUM = 7,
+ NIC1_MBOX_SEG_NUM = 8,
+ SMBUS_SEG_NUM = 9,
+ FC2_MBOX_SEG_NUM = 10,
+ NIC2_MBOX_SEG_NUM = 11,
+ I2C_SEG_NUM = 12,
+ MEMC_SEG_NUM = 13,
+ PBUS_SEG_NUM = 14,
+ MDE_SEG_NUM = 15,
+ NIC1_CONTROL_SEG_NUM = 16,
+ NIC2_CONTROL_SEG_NUM = 17,
+ NIC1_XGMAC_SEG_NUM = 18,
+ NIC2_XGMAC_SEG_NUM = 19,
+ WCS_RAM_SEG_NUM = 20,
+ MEMC_RAM_SEG_NUM = 21,
+ XAUI_AN_SEG_NUM = 22,
+ XAUI_HSS_PCS_SEG_NUM = 23,
+ XFI_AN_SEG_NUM = 24,
+ XFI_TRAIN_SEG_NUM = 25,
+ XFI_HSS_PCS_SEG_NUM = 26,
+ XFI_HSS_TX_SEG_NUM = 27,
+ XFI_HSS_RX_SEG_NUM = 28,
+ XFI_HSS_PLL_SEG_NUM = 29,
+ MISC_NIC_INFO_SEG_NUM = 30,
+ INTR_STATES_SEG_NUM = 31,
+ CAM_ENTRIES_SEG_NUM = 32,
+ ROUTING_WORDS_SEG_NUM = 33,
+ ETS_SEG_NUM = 34,
+ PROBE_DUMP_SEG_NUM = 35,
+ ROUTING_INDEX_SEG_NUM = 36,
+ MAC_PROTOCOL_SEG_NUM = 37,
+ XAUI2_AN_SEG_NUM = 38,
+ XAUI2_HSS_PCS_SEG_NUM = 39,
+ XFI2_AN_SEG_NUM = 40,
+ XFI2_TRAIN_SEG_NUM = 41,
+ XFI2_HSS_PCS_SEG_NUM = 42,
+ XFI2_HSS_TX_SEG_NUM = 43,
+ XFI2_HSS_RX_SEG_NUM = 44,
+ XFI2_HSS_PLL_SEG_NUM = 45,
+ SEM_REGS_SEG_NUM = 50
+
+};
+
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
+struct ql_nic_misc {
+ u32 rx_ring_count;
+ u32 tx_ring_count;
+ u32 intr_count;
+ u32 function;
+};
+
+struct ql_reg_dump {
+
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[64];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_CPUS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[8+2];
+};
+
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts. It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+ struct ql_adapter *qdev;
+ u32 intr;
+ u32 irq_mask; /* Mask of which rings the vector services. */
+ u32 hooked;
+ u32 intr_en_mask; /* value/mask used to enable this intr */
+ u32 intr_dis_mask; /* value/mask used to disable this intr */
+ u32 intr_read_mask; /* value/mask used to read this intr */
+ char name[IFNAMSIZ * 2];
+ atomic_t irq_cnt; /* irq_cnt is used in single vector
+ * environment. It's incremented for each
+ * irq handler that is scheduled. When each
+ * handler finishes it decrements irq_cnt and
+ * enables interrupts if it's zero. */
+ irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+ QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
+ QL_LEGACY_ENABLED = 1,
+ QL_MSI_ENABLED = 2,
+ QL_MSIX_ENABLED = 3,
+ QL_DMA64 = 4,
+ QL_PROMISCUOUS = 5,
+ QL_ALLMULTI = 6,
+ QL_PORT_CFG = 7,
+ QL_CAM_RT_SET = 8,
+ QL_SELFTEST = 9,
+ QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
+ QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
+};
+
+/* link_status bit definitions */
+enum {
+ STS_LOOPBACK_MASK = 0x00000700,
+ STS_LOOPBACK_PCS = 0x00000100,
+ STS_LOOPBACK_HSS = 0x00000200,
+ STS_LOOPBACK_EXT = 0x00000300,
+ STS_PAUSE_MASK = 0x000000c0,
+ STS_PAUSE_STD = 0x00000040,
+ STS_PAUSE_PRI = 0x00000080,
+ STS_SPEED_MASK = 0x00000038,
+ STS_SPEED_100Mb = 0x00000000,
+ STS_SPEED_1Gb = 0x00000008,
+ STS_SPEED_10Gb = 0x00000010,
+ STS_LINK_TYPE_MASK = 0x00000007,
+ STS_LINK_TYPE_XFI = 0x00000001,
+ STS_LINK_TYPE_XAUI = 0x00000002,
+ STS_LINK_TYPE_XFI_BP = 0x00000003,
+ STS_LINK_TYPE_XAUI_BP = 0x00000004,
+ STS_LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/* link_config bit definitions */
+enum {
+ CFG_JUMBO_FRAME_SIZE = 0x00010000,
+ CFG_PAUSE_MASK = 0x00000060,
+ CFG_PAUSE_STD = 0x00000020,
+ CFG_PAUSE_PRI = 0x00000040,
+ CFG_DCBX = 0x00000010,
+ CFG_LOOPBACK_MASK = 0x00000007,
+ CFG_LOOPBACK_PCS = 0x00000002,
+ CFG_LOOPBACK_HSS = 0x00000004,
+ CFG_LOOPBACK_EXT = 0x00000006,
+ CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
+};
+
+struct nic_operations {
+
+ int (*get_flash) (struct ql_adapter *);
+ int (*port_initialize) (struct ql_adapter *);
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+ struct ricb ricb;
+ unsigned long flags;
+ u32 wol;
+
+ struct nic_stats nic_stats;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ /* Hardware information */
+ u32 chip_rev_id;
+ u32 fw_rev_id;
+ u32 func; /* PCI function for this adapter */
+ u32 alt_func; /* PCI function for alternate adapter */
+ u32 port; /* Port number this adapter */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+ spinlock_t stats_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ void __iomem *reg_base;
+ void __iomem *doorbell_area;
+ u32 doorbell_area_size;
+
+ u32 msg_enable;
+
+ /* Page for Shadow Registers */
+ void *rx_ring_shadow_reg_area;
+ dma_addr_t rx_ring_shadow_reg_dma;
+ void *tx_ring_shadow_reg_area;
+ dma_addr_t tx_ring_shadow_reg_dma;
+
+ u32 mailbox_in;
+ u32 mailbox_out;
+ struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
+
+ int tx_ring_size;
+ int rx_ring_size;
+ u32 intr_count;
+ struct msix_entry *msi_x_entry;
+ struct intr_context intr_context[MAX_RX_RINGS];
+
+ int tx_ring_count; /* One per online CPU. */
+ u32 rss_ring_count; /* One per irq vector. */
+ /*
+ * rx_ring_count =
+ * (CPU count * outbound completion rx_ring) +
+ * (irq_vector_cnt * inbound (RSS) completion rx_ring)
+ */
+ int rx_ring_count;
+ int ring_mem_size;
+ void *ring_mem;
+
+ struct rx_ring rx_ring[MAX_RX_RINGS];
+ struct tx_ring tx_ring[MAX_TX_RINGS];
+ unsigned int lbq_buf_order;
+
+ int rx_csum;
+ u32 default_rx_queue;
+
+ u16 rx_coalesce_usecs; /* cqicb->int_delay */
+ u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
+ u16 tx_coalesce_usecs; /* cqicb->int_delay */
+ u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
+
+ u32 xg_sem_mask;
+ u32 port_link_up;
+ u32 port_init;
+ u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
+ u32 link_config;
+ u32 led_config;
+ u32 max_frame_size;
+
+ union flash_params flash;
+
+ struct workqueue_struct *workqueue;
+ struct delayed_work asic_reset_work;
+ struct delayed_work mpi_reset_work;
+ struct delayed_work mpi_work;
+ struct delayed_work mpi_port_cfg_work;
+ struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
+ struct completion ide_completion;
+ const struct nic_operations *nic_ops;
+ u16 device_id;
+ struct timer_list timer;
+ atomic_t lb_count;
+ /* Keep local copy of current mac address. */
+ char current_mac_addr[ETH_ALEN];
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+ return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+ writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization. The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue. Each queue uses
+ * 1 4k chunk of memory. The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+ mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline u32 ql_read_sh_reg(__le32 *addr)
+{
+ u32 reg;
+ reg = le32_to_cpu(*addr);
+ rmb();
+ return reg;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern const struct ethtool_ops qlge_ethtool_ops;
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+void ql_mpi_idc_work(struct work_struct *work);
+void ql_mpi_port_cfg_work(struct work_struct *work);
+int ql_mb_get_fw_state(struct ql_adapter *qdev);
+int ql_cam_route_initialize(struct ql_adapter *qdev);
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
+int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
+
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
+/* #define QL_IB_DUMP */
+/* #define QL_OB_DUMP */
+
+#ifdef QL_REG_DUMP
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+ ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
new file mode 100644
index 000000000..829be21f9
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -0,0 +1,2042 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "qlge.h"
+
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
+{
+ int status = 0;
+ int i;
+
+ for (i = 0; i < 8; i++, buf++) {
+ ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, NIC_ETS);
+ }
+
+ for (i = 0; i < 2; i++, buf++) {
+ ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, CNA_ETS);
+ }
+
+ return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
+{
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+ ql_write32(qdev, INTR_EN,
+ qdev->intr_context[i].intr_read_mask);
+ *buf = ql_read32(qdev, INTR_EN);
+ }
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
+{
+ int i, status;
+ u32 value[3];
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower MAC address */
+ *buf++ = value[1]; /* upper MAC address */
+ *buf++ = value[2]; /* output */
+ }
+ for (i = 0; i < 32; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower Mcast address */
+ *buf++ = value[1]; /* upper Mcast address */
+ }
+err:
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 value, i;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_routing_reg(qdev, i, &value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register\n");
+ goto err;
+ } else {
+ *buf++ = value;
+ }
+ }
+err:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ pr_err("Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+ struct mpi_coredump_segment_header *seg_hdr,
+ u32 seg_number, u32 seg_size, u8 *desc)
+{
+ memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->segNum = seg_number;
+ seg_hdr->segSize = seg_size;
+ memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+ return -EINVAL;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up\n");
+ return;
+ }
+ ql_queue_fw_error(qdev);
+}
+
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
+{
+ int i, status;
+
+
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_reg_dump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+ /* segment 16 */
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 16, Rev C. Step 18 */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_regs),
+ "NIC Registers");
+ /* Get generic reg dump */
+ for (i = 0; i < 64; i++)
+ mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ return;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ return;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+ /*
+ * If the dump has already been taken and is stored
+ * in our internal buffer and if force dump is set then
+ * just start the spool to dump it to the log file
+ * and also, take a snapshot of the general regs to
+ * to the user's buffer or else take complete dump
+ * to the user's buffer if force is not set.
+ */
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (!ql_core_dump(qdev, buff))
+ ql_soft_reset_mpi_risc(qdev);
+ else
+ netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+ } else {
+ ql_gen_reg_dump(qdev, buff);
+ ql_get_core_dump(qdev);
+ }
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x\n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
+}
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value;
+ for (i = 0; i < qdev->intr_count; i++) {
+ ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
+ value = ql_read32(qdev, INTR_EN);
+ pr_err("%s: Interrupt %d is %s\n",
+ qdev->ndev->name, i,
+ (value & INTR_EN_EN ? "enabled" : "disabled"));
+ }
+}
+
+#define DUMP_XGMAC(qdev, reg) \
+do { \
+ u32 data; \
+ ql_read_xgmac_reg(qdev, reg, &data); \
+ pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+} while (0)
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+ if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+ pr_err("%s: Couldn't get xgmac sem\n", __func__);
+ return;
+ }
+ DUMP_XGMAC(qdev, PAUSE_SRC_LO);
+ DUMP_XGMAC(qdev, PAUSE_SRC_HI);
+ DUMP_XGMAC(qdev, GLOBAL_CFG);
+ DUMP_XGMAC(qdev, TX_CFG);
+ DUMP_XGMAC(qdev, RX_CFG);
+ DUMP_XGMAC(qdev, FLOW_CTL);
+ DUMP_XGMAC(qdev, PAUSE_OPCODE);
+ DUMP_XGMAC(qdev, PAUSE_TIMER);
+ DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
+ DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
+ DUMP_XGMAC(qdev, MAC_TX_PARAMS);
+ DUMP_XGMAC(qdev, MAC_RX_PARAMS);
+ DUMP_XGMAC(qdev, MAC_SYS_INT);
+ DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
+ DUMP_XGMAC(qdev, MAC_MGMT_INT);
+ DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
+ DUMP_XGMAC(qdev, EXT_ARB_MODE);
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value[3];
+
+ i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (i)
+ return;
+ for (i = 0; i < 4; i++) {
+ if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
+ pr_err("%s: Failed read of mac index register\n",
+ __func__);
+ return;
+ } else {
+ if (value[0])
+ pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
+ qdev->ndev->name, i, value[1], value[0],
+ value[2]);
+ }
+ }
+ for (i = 0; i < 32; i++) {
+ if (ql_get_mac_addr_reg
+ (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+ pr_err("%s: Failed read of mac index register\n",
+ __func__);
+ return;
+ } else {
+ if (value[0])
+ pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
+ qdev->ndev->name, i, value[1], value[0]);
+ }
+ }
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value;
+ i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (i)
+ return;
+ for (i = 0; i < 16; i++) {
+ value = 0;
+ if (ql_get_routing_reg(qdev, i, &value)) {
+ pr_err("%s: Failed read of routing index register\n",
+ __func__);
+ return;
+ } else {
+ if (value)
+ pr_err("%s: Routing Mask %d = 0x%.08x\n",
+ qdev->ndev->name, i, value);
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+#define DUMP_REG(qdev, reg) \
+ pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+ pr_err("reg dump for function #%d\n", qdev->func);
+ DUMP_REG(qdev, SYS);
+ DUMP_REG(qdev, RST_FO);
+ DUMP_REG(qdev, FSC);
+ DUMP_REG(qdev, CSR);
+ DUMP_REG(qdev, ICB_RID);
+ DUMP_REG(qdev, ICB_L);
+ DUMP_REG(qdev, ICB_H);
+ DUMP_REG(qdev, CFG);
+ DUMP_REG(qdev, BIOS_ADDR);
+ DUMP_REG(qdev, STS);
+ DUMP_REG(qdev, INTR_EN);
+ DUMP_REG(qdev, INTR_MASK);
+ DUMP_REG(qdev, ISR1);
+ DUMP_REG(qdev, ISR2);
+ DUMP_REG(qdev, ISR3);
+ DUMP_REG(qdev, ISR4);
+ DUMP_REG(qdev, REV_ID);
+ DUMP_REG(qdev, FRC_ECC_ERR);
+ DUMP_REG(qdev, ERR_STS);
+ DUMP_REG(qdev, RAM_DBG_ADDR);
+ DUMP_REG(qdev, RAM_DBG_DATA);
+ DUMP_REG(qdev, ECC_ERR_CNT);
+ DUMP_REG(qdev, SEM);
+ DUMP_REG(qdev, GPIO_1);
+ DUMP_REG(qdev, GPIO_2);
+ DUMP_REG(qdev, GPIO_3);
+ DUMP_REG(qdev, XGMAC_ADDR);
+ DUMP_REG(qdev, XGMAC_DATA);
+ DUMP_REG(qdev, NIC_ETS);
+ DUMP_REG(qdev, CNA_ETS);
+ DUMP_REG(qdev, FLASH_ADDR);
+ DUMP_REG(qdev, FLASH_DATA);
+ DUMP_REG(qdev, CQ_STOP);
+ DUMP_REG(qdev, PAGE_TBL_RID);
+ DUMP_REG(qdev, WQ_PAGE_TBL_LO);
+ DUMP_REG(qdev, WQ_PAGE_TBL_HI);
+ DUMP_REG(qdev, CQ_PAGE_TBL_LO);
+ DUMP_REG(qdev, CQ_PAGE_TBL_HI);
+ DUMP_REG(qdev, COS_DFLT_CQ1);
+ DUMP_REG(qdev, COS_DFLT_CQ2);
+ DUMP_REG(qdev, SPLT_HDR);
+ DUMP_REG(qdev, FC_PAUSE_THRES);
+ DUMP_REG(qdev, NIC_PAUSE_THRES);
+ DUMP_REG(qdev, FC_ETHERTYPE);
+ DUMP_REG(qdev, FC_RCV_CFG);
+ DUMP_REG(qdev, NIC_RCV_CFG);
+ DUMP_REG(qdev, FC_COS_TAGS);
+ DUMP_REG(qdev, NIC_COS_TAGS);
+ DUMP_REG(qdev, MGMT_RCV_CFG);
+ DUMP_REG(qdev, XG_SERDES_ADDR);
+ DUMP_REG(qdev, XG_SERDES_DATA);
+ DUMP_REG(qdev, PRB_MX_ADDR);
+ DUMP_REG(qdev, PRB_MX_DATA);
+ ql_dump_intr_states(qdev);
+ ql_dump_xgmac_control_regs(qdev);
+ ql_dump_ets_regs(qdev);
+ ql_dump_cam_entries(qdev);
+ ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+
+#define DUMP_STAT(qdev, stat) \
+ pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+ pr_err("%s: Enter\n", __func__);
+ DUMP_STAT(qdev, tx_pkts);
+ DUMP_STAT(qdev, tx_bytes);
+ DUMP_STAT(qdev, tx_mcast_pkts);
+ DUMP_STAT(qdev, tx_bcast_pkts);
+ DUMP_STAT(qdev, tx_ucast_pkts);
+ DUMP_STAT(qdev, tx_ctl_pkts);
+ DUMP_STAT(qdev, tx_pause_pkts);
+ DUMP_STAT(qdev, tx_64_pkt);
+ DUMP_STAT(qdev, tx_65_to_127_pkt);
+ DUMP_STAT(qdev, tx_128_to_255_pkt);
+ DUMP_STAT(qdev, tx_256_511_pkt);
+ DUMP_STAT(qdev, tx_512_to_1023_pkt);
+ DUMP_STAT(qdev, tx_1024_to_1518_pkt);
+ DUMP_STAT(qdev, tx_1519_to_max_pkt);
+ DUMP_STAT(qdev, tx_undersize_pkt);
+ DUMP_STAT(qdev, tx_oversize_pkt);
+ DUMP_STAT(qdev, rx_bytes);
+ DUMP_STAT(qdev, rx_bytes_ok);
+ DUMP_STAT(qdev, rx_pkts);
+ DUMP_STAT(qdev, rx_pkts_ok);
+ DUMP_STAT(qdev, rx_bcast_pkts);
+ DUMP_STAT(qdev, rx_mcast_pkts);
+ DUMP_STAT(qdev, rx_ucast_pkts);
+ DUMP_STAT(qdev, rx_undersize_pkts);
+ DUMP_STAT(qdev, rx_oversize_pkts);
+ DUMP_STAT(qdev, rx_jabber_pkts);
+ DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
+ DUMP_STAT(qdev, rx_drop_events);
+ DUMP_STAT(qdev, rx_fcerr_pkts);
+ DUMP_STAT(qdev, rx_align_err);
+ DUMP_STAT(qdev, rx_symbol_err);
+ DUMP_STAT(qdev, rx_mac_err);
+ DUMP_STAT(qdev, rx_ctl_pkts);
+ DUMP_STAT(qdev, rx_pause_pkts);
+ DUMP_STAT(qdev, rx_64_pkts);
+ DUMP_STAT(qdev, rx_65_to_127_pkts);
+ DUMP_STAT(qdev, rx_128_255_pkts);
+ DUMP_STAT(qdev, rx_256_511_pkts);
+ DUMP_STAT(qdev, rx_512_to_1023_pkts);
+ DUMP_STAT(qdev, rx_1024_to_1518_pkts);
+ DUMP_STAT(qdev, rx_1519_to_max_pkts);
+ DUMP_STAT(qdev, rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+
+#define DUMP_QDEV_FIELD(qdev, type, field) \
+ pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+#define DUMP_QDEV_DMA_FIELD(qdev, field) \
+ pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
+ pr_err("%s[%d].%s = " type "\n", \
+ #array, index, #field, qdev->array[index].field);
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+ int i;
+ DUMP_QDEV_FIELD(qdev, "%lx", flags);
+ DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
+ DUMP_QDEV_FIELD(qdev, "%p", pdev);
+ DUMP_QDEV_FIELD(qdev, "%p", ndev);
+ DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
+ DUMP_QDEV_FIELD(qdev, "%p", reg_base);
+ DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
+ DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
+ DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
+ DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
+ DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
+ DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
+ DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
+ DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+ if (qdev->msi_x_entry)
+ for (i = 0; i < qdev->intr_count; i++) {
+ DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
+ DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
+ }
+ for (i = 0; i < qdev->intr_count; i++) {
+ DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
+ DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
+ DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
+ }
+ DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
+ DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
+ DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+ DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
+ DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
+ DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+ pr_err("Dumping wqicb stuff...\n");
+ pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+ pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
+ pr_err("wqicb->cq_id_rss = %d\n",
+ le16_to_cpu(wqicb->cq_id_rss));
+ pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+ pr_err("wqicb->wq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(wqicb->addr));
+ pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+ if (tx_ring == NULL)
+ return;
+ pr_err("===================== Dumping tx_ring %d ===============\n",
+ tx_ring->wq_id);
+ pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
+ pr_err("tx_ring->base_dma = 0x%llx\n",
+ (unsigned long long) tx_ring->wq_base_dma);
+ pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
+ tx_ring->cnsmr_idx_sh_reg,
+ tx_ring->cnsmr_idx_sh_reg
+ ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
+ pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
+ pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
+ pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+ pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+ pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+ pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
+ pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
+ pr_err("tx_ring->q = %p\n", tx_ring->q);
+ pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+ int i;
+ pr_err("===================== Dumping ricb ===============\n");
+ pr_err("Dumping ricb stuff...\n");
+
+ pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+ pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
+ ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+ ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+ ricb->flags & RSS_LI ? "RSS_LI " : "",
+ ricb->flags & RSS_LB ? "RSS_LB " : "",
+ ricb->flags & RSS_LM ? "RSS_LM " : "",
+ ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+ ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+ ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+ ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+ pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
+ for (i = 0; i < 16; i++)
+ pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->hash_cq_id[i]));
+ for (i = 0; i < 10; i++)
+ pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv6_hash_key[i]));
+ for (i = 0; i < 4; i++)
+ pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+ pr_err("Dumping cqicb stuff...\n");
+
+ pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
+ pr_err("cqicb->flags = %x\n", cqicb->flags);
+ pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+ pr_err("cqicb->addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->addr));
+ pr_err("cqicb->prod_idx_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
+ pr_err("cqicb->pkt_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->pkt_delay));
+ pr_err("cqicb->irq_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->irq_delay));
+ pr_err("cqicb->lbq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
+ pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_buf_size));
+ pr_err("cqicb->lbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_len));
+ pr_err("cqicb->sbq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
+ pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_buf_size));
+ pr_err("cqicb->sbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+ if (rx_ring == NULL)
+ return;
+ pr_err("===================== Dumping rx_ring %d ===============\n",
+ rx_ring->cq_id);
+ pr_err("Dumping rx_ring %d, type = %s%s%s\n",
+ rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+ rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+ rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+ pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+ pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
+ pr_err("rx_ring->cq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->cq_base_dma);
+ pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
+ pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
+ pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
+ rx_ring->prod_idx_sh_reg,
+ rx_ring->prod_idx_sh_reg
+ ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
+ pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
+ (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+ pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
+ rx_ring->cnsmr_idx_db_reg);
+ pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+ pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+ pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
+
+ pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
+ pr_err("rx_ring->lbq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->lbq_base_dma);
+ pr_err("rx_ring->lbq_base_indirect = %p\n",
+ rx_ring->lbq_base_indirect);
+ pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
+ (unsigned long long) rx_ring->lbq_base_indirect_dma);
+ pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
+ pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
+ pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
+ pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
+ rx_ring->lbq_prod_idx_db_reg);
+ pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
+ pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+ pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
+ pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
+ pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
+
+ pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
+ pr_err("rx_ring->sbq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->sbq_base_dma);
+ pr_err("rx_ring->sbq_base_indirect = %p\n",
+ rx_ring->sbq_base_indirect);
+ pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
+ (unsigned long long) rx_ring->sbq_base_indirect_dma);
+ pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
+ pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
+ pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
+ pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq_prod_idx_db_reg);
+ pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
+ pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
+ pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
+ pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
+ pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+ pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
+ pr_err("rx_ring->irq = %d\n", rx_ring->irq);
+ pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
+ pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+ void *ptr;
+
+ pr_err("%s: Enter\n", __func__);
+
+ ptr = kmalloc(size, GFP_ATOMIC);
+ if (ptr == NULL)
+ return;
+
+ if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+ pr_err("%s: Failed to upload control block!\n", __func__);
+ goto fail_it;
+ }
+ switch (bit) {
+ case CFG_DRQ:
+ ql_dump_wqicb((struct wqicb *)ptr);
+ break;
+ case CFG_DCQ:
+ ql_dump_cqicb((struct cqicb *)ptr);
+ break;
+ case CFG_DR:
+ ql_dump_ricb((struct ricb *)ptr);
+ break;
+ default:
+ pr_err("%s: Invalid bit value = %x\n", __func__, bit);
+ break;
+ }
+fail_it:
+ kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+ tbd++;
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+ tbd++;
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+ struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+ (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+ struct tx_buf_desc *tbd;
+ u16 frame_len;
+
+ pr_err("%s\n", __func__);
+ pr_err("opcode = %s\n",
+ (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+ pr_err("flags1 = %s %s %s %s %s\n",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+ pr_err("flags2 = %s %s %s\n",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+ pr_err("flags3 = %s %s %s\n",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+ pr_err("tid = %x\n", ob_mac_iocb->tid);
+ pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
+ pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
+ if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+ pr_err("frame_len = %d\n",
+ le32_to_cpu(ob_mac_tso_iocb->frame_len));
+ pr_err("mss = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->mss));
+ pr_err("prot_hdr_len = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+ pr_err("hdr_offset = 0x%.04x\n",
+ le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+ frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+ } else {
+ pr_err("frame_len = %d\n",
+ le16_to_cpu(ob_mac_iocb->frame_len));
+ frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+ }
+ tbd = &ob_mac_iocb->tbd[0];
+ ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+ pr_err("%s\n", __func__);
+ pr_err("opcode = %d\n", ob_mac_rsp->opcode);
+ pr_err("flags = %s %s %s %s %s %s %s\n",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+ ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+ pr_err("tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ pr_err("%s\n", __func__);
+ pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
+ pr_err("flags1 = %s%s%s%s%s%s\n",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+ pr_err("%s%s%s Multicast\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+ pr_err("flags2 = %s%s%s%s%s\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+ pr_err("%s%s%s%s%s error\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+ pr_err("flags3 = %s%s\n",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+ pr_err("RSS flags = %s%s%s%s\n",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+ pr_err("data_len = %d\n",
+ le32_to_cpu(ib_mac_rsp->data_len));
+ pr_err("data_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+ pr_err("rss = %x\n",
+ le32_to_cpu(ib_mac_rsp->rss));
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+ pr_err("vlan_id = %x\n",
+ le16_to_cpu(ib_mac_rsp->vlan_id));
+
+ pr_err("flags4 = %s%s%s\n",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ pr_err("hdr length = %d\n",
+ le32_to_cpu(ib_mac_rsp->hdr_len));
+ pr_err("hdr addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
+ }
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+ int i;
+
+ QL_DUMP_REGS(qdev);
+ QL_DUMP_QDEV(qdev);
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+ QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+ }
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+ QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+ }
+}
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
new file mode 100644
index 000000000..c3c514e33
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -0,0 +1,735 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+
+#include "qlge.h"
+
+struct ql_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+ {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+ {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+ {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+ QL_OFF(nic_stats.tx_mcast_pkts)},
+ {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+ QL_OFF(nic_stats.tx_bcast_pkts)},
+ {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+ QL_OFF(nic_stats.tx_ucast_pkts)},
+ {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+ QL_OFF(nic_stats.tx_ctl_pkts)},
+ {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+ QL_OFF(nic_stats.tx_pause_pkts)},
+ {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+ QL_OFF(nic_stats.tx_64_pkt)},
+ {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+ QL_OFF(nic_stats.tx_65_to_127_pkt)},
+ {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+ QL_OFF(nic_stats.tx_128_to_255_pkt)},
+ {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+ QL_OFF(nic_stats.tx_256_511_pkt)},
+ {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+ QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+ {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+ QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+ {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+ QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+ {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+ QL_OFF(nic_stats.tx_undersize_pkt)},
+ {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+ QL_OFF(nic_stats.tx_oversize_pkt)},
+ {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+ {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+ QL_OFF(nic_stats.rx_bytes_ok)},
+ {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+ {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+ QL_OFF(nic_stats.rx_pkts_ok)},
+ {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+ QL_OFF(nic_stats.rx_bcast_pkts)},
+ {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+ QL_OFF(nic_stats.rx_mcast_pkts)},
+ {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+ QL_OFF(nic_stats.rx_ucast_pkts)},
+ {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+ QL_OFF(nic_stats.rx_undersize_pkts)},
+ {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+ QL_OFF(nic_stats.rx_oversize_pkts)},
+ {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+ QL_OFF(nic_stats.rx_jabber_pkts)},
+ {"rx_undersize_fcerr_pkts",
+ QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+ QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+ {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+ QL_OFF(nic_stats.rx_drop_events)},
+ {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+ QL_OFF(nic_stats.rx_fcerr_pkts)},
+ {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+ QL_OFF(nic_stats.rx_align_err)},
+ {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+ QL_OFF(nic_stats.rx_symbol_err)},
+ {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+ QL_OFF(nic_stats.rx_mac_err)},
+ {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+ QL_OFF(nic_stats.rx_ctl_pkts)},
+ {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+ QL_OFF(nic_stats.rx_pause_pkts)},
+ {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+ QL_OFF(nic_stats.rx_64_pkts)},
+ {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+ QL_OFF(nic_stats.rx_65_to_127_pkts)},
+ {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+ QL_OFF(nic_stats.rx_128_255_pkts)},
+ {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+ QL_OFF(nic_stats.rx_256_511_pkts)},
+ {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+ QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+ {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+ QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+ {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+ QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+ {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+ QL_OFF(nic_stats.rx_len_err_pkts)},
+ {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+ QL_OFF(nic_stats.rx_code_err)},
+ {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+ QL_OFF(nic_stats.rx_oversize_err)},
+ {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+ QL_OFF(nic_stats.rx_undersize_err)},
+ {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+ QL_OFF(nic_stats.rx_preamble_err)},
+ {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+ QL_OFF(nic_stats.rx_frame_len_err)},
+ {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+ QL_OFF(nic_stats.rx_crc_err)},
+ {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+ QL_OFF(nic_stats.rx_err_count)},
+ {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+ {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+ {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+ {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+ {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+ {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+ {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+ {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+ {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+ {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+ {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+ {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+ {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+ {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+ {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+ {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+ {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+ QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_RCV_MAC_ERR_STATS 7
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+ struct rx_ring *rx_ring;
+ struct cqicb *cqicb;
+
+ if (!netif_running(qdev->ndev))
+ return status;
+
+ /* Skip the default queue, and update the outbound handler
+ * queues if they changed.
+ */
+ cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
+ if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+ le16_to_cpu(cqicb->pkt_delay) !=
+ qdev->tx_max_coalesced_frames) {
+ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ cqicb = (struct cqicb *)rx_ring;
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay =
+ cpu_to_le16(qdev->tx_max_coalesced_frames);
+ cqicb->flags = FLAGS_LI;
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
+ goto exit;
+ }
+ }
+ }
+
+ /* Update the inbound (RSS) handler queues if they changed. */
+ cqicb = (struct cqicb *)&qdev->rx_ring[0];
+ if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+ le16_to_cpu(cqicb->pkt_delay) !=
+ qdev->rx_max_coalesced_frames) {
+ for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+ rx_ring = &qdev->rx_ring[i];
+ cqicb = (struct cqicb *)rx_ring;
+ cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+ cqicb->pkt_delay =
+ cpu_to_le16(qdev->rx_max_coalesced_frames);
+ cqicb->flags = FLAGS_LI;
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
+ goto exit;
+ }
+ }
+ }
+exit:
+ return status;
+}
+
+static void ql_update_stats(struct ql_adapter *qdev)
+{
+ u32 i;
+ u64 data;
+ u64 *iter = &qdev->nic_stats.tx_pkts;
+
+ spin_lock(&qdev->stats_lock);
+ if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
+ goto quit;
+ }
+ /*
+ * Get TX statistics.
+ */
+ for (i = 0x200; i < 0x280; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get RX statistics.
+ */
+ for (i = 0x300; i < 0x3d0; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /* Update receive mac error statistics */
+ iter += QLGE_RCV_MAC_ERR_STATS;
+
+ /*
+ * Get Per-priority TX pause frame counter statistics.
+ */
+ for (i = 0x500; i < 0x540; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get Per-priority RX pause frame counter statistics.
+ */
+ for (i = 0x568; i < 0x5a8; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get RX NIC FIFO DROP statistics.
+ */
+ if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
+ goto end;
+ } else
+ *iter = data;
+end:
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+ spin_unlock(&qdev->stats_lock);
+
+ QL_DUMP_STAT(qdev);
+}
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ int index;
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLGE_STATS_LEN; index++) {
+ memcpy(buf + index * ETH_GSTRING_LEN,
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static int ql_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLGE_TEST_LEN;
+ case ETH_SS_STATS:
+ return QLGE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int index, length;
+
+ length = QLGE_STATS_LEN;
+ ql_update_stats(qdev);
+
+ for (index = 0; index < length; index++) {
+ char *p = (char *)qdev +
+ ql_gstrings_stats[index].stat_offset;
+ *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ }
+}
+
+static int ql_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
+ STS_LINK_TYPE_10GBASET) {
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ ecmd->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, qlge_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "v%d.%d.%d",
+ (qdev->fw_rev_id & 0x00ff0000) >> 16,
+ (qdev->fw_rev_id & 0x0000ff00) >> 8,
+ (qdev->fw_rev_id & 0x000000ff));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+ else
+ drvinfo->regdump_len = sizeof(struct ql_reg_dump);
+ drvinfo->eedump_len = 0;
+}
+
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = qdev->wol;
+ }
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ netif_info(qdev, drv, qdev->ndev,
+ "WOL is only supported for mezz card\n");
+ return -EOPNOTSUPP;
+ }
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ qdev->wol = wol->wolopts;
+
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
+ return 0;
+}
+
+static int ql_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
+
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current LED settings */
+ if (ql_mb_get_led_cfg(qdev))
+ return -EIO;
+
+ /* Start blinking */
+ ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+ return 0;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ if (ql_mb_set_led_cfg(qdev, qdev->led_config))
+ return -EIO;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ql_start_loopback(struct ql_adapter *qdev)
+{
+ if (netif_carrier_ok(qdev->ndev)) {
+ set_bit(QL_LB_LINK_UP, &qdev->flags);
+ netif_carrier_off(qdev->ndev);
+ } else
+ clear_bit(QL_LB_LINK_UP, &qdev->flags);
+ qdev->link_config |= CFG_LOOPBACK_PCS;
+ return ql_mb_set_port_cfg(qdev);
+}
+
+static void ql_stop_loopback(struct ql_adapter *qdev)
+{
+ qdev->link_config &= ~CFG_LOOPBACK_PCS;
+ ql_mb_set_port_cfg(qdev);
+ if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
+ netif_carrier_on(qdev->ndev);
+ clear_bit(QL_LB_LINK_UP, &qdev->flags);
+ }
+}
+
+static void ql_create_lb_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+void ql_check_lb_frame(struct ql_adapter *qdev,
+ struct sk_buff *skb)
+{
+ unsigned int frame_size = skb->len;
+
+ if ((*(skb->data + 3) == 0xFF) &&
+ (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ atomic_dec(&qdev->lb_count);
+ return;
+ }
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+ int i;
+ netdev_tx_t rc;
+ struct sk_buff *skb;
+ unsigned int size = SMALL_BUF_MAP_SIZE;
+
+ for (i = 0; i < 64; i++) {
+ skb = netdev_alloc_skb(qdev->ndev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->queue_mapping = 0;
+ skb_put(skb, size);
+ ql_create_lb_frame(skb, size);
+ rc = ql_lb_send(skb, qdev->ndev);
+ if (rc != NETDEV_TX_OK)
+ return -EPIPE;
+ atomic_inc(&qdev->lb_count);
+ }
+ /* Give queue time to settle before testing results. */
+ msleep(2);
+ ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+ return atomic_read(&qdev->lb_count) ? -EIO : 0;
+}
+
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+ *data = ql_start_loopback(qdev);
+ if (*data)
+ goto out;
+ *data = ql_run_loopback_test(qdev);
+out:
+ ql_stop_loopback(qdev);
+ return *data;
+}
+
+static void ql_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
+ if (netif_running(ndev)) {
+ set_bit(QL_SELFTEST, &qdev->flags);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+ if (ql_loopback_test(qdev, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ } else {
+ /* Online tests */
+ data[0] = 0;
+ }
+ clear_bit(QL_SELFTEST, &qdev->flags);
+ /* Give link time to come up after
+ * port configuration changes.
+ */
+ msleep_interruptible(4 * 1000);
+ } else {
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ return sizeof(struct ql_mpi_coredump);
+ else
+ return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ ql_get_dump(qdev, p);
+ qdev->core_is_dumped = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ regs->len = sizeof(struct ql_mpi_coredump);
+ else
+ regs->len = sizeof(struct ql_reg_dump);
+}
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct ql_adapter *qdev = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+ c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+ /* This chip coalesces as follows:
+ * If a packet arrives, hold off interrupts until
+ * cqicb->int_delay expires, but if no other packets arrive don't
+ * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+ * timer to coalesce on a frame basis. So, we have to take ethtool's
+ * max_coalesced_frames value and convert it to a delay in microseconds.
+ * We do this by using a basic thoughput of 1,000,000 frames per
+ * second @ (1024 bytes). This means one frame per usec. So it's a
+ * simple one to one ratio.
+ */
+ c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+ c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ /* Validate user parameters. */
+ if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+ return -EINVAL;
+ /* Don't wait more than 10 usec. */
+ if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+ return -EINVAL;
+ if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+ return -EINVAL;
+ if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+ return -EINVAL;
+
+ /* Verify a change took place before updating the hardware. */
+ if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+ qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+ qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+ qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+ return 0;
+
+ qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+ qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+ qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+ qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+ return ql_update_ring_coalescing(qdev);
+}
+
+static void ql_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+
+ ql_mb_get_port_cfg(qdev);
+ if (qdev->link_config & CFG_PAUSE_STD) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+ int status = 0;
+
+ if ((pause->rx_pause) && (pause->tx_pause))
+ qdev->link_config |= CFG_PAUSE_STD;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ qdev->link_config &= ~CFG_PAUSE_STD;
+ else
+ return -EINVAL;
+
+ status = ql_mb_set_port_cfg(qdev);
+ return status;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+const struct ethtool_ops qlge_ethtool_ops = {
+ .get_settings = ql_get_settings,
+ .get_drvinfo = ql_get_drvinfo,
+ .get_wol = ql_get_wol,
+ .set_wol = ql_set_wol,
+ .get_regs_len = ql_get_regs_len,
+ .get_regs = ql_get_regs,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .set_phys_id = ql_set_phys_id,
+ .self_test = ql_self_test,
+ .get_pauseparam = ql_get_pauseparam,
+ .set_pauseparam = ql_set_pauseparam,
+ .get_coalesce = ql_get_coalesce,
+ .set_coalesce = ql_set_coalesce,
+ .get_sset_count = ql_get_sset_count,
+ .get_strings = ql_get_strings,
+ .get_ethtool_stats = ql_get_ethtool_stats,
+};
+
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c
new file mode 100644
index 000000000..25800a1de
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -0,0 +1,5016 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author: Linux qlge network device driver by
+ * Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/prefetch.h>
+#include <net/ip6_checksum.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg =
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+/* NETIF_MSG_TIMER | */
+ NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR |
+/* NETIF_MSG_TX_QUEUED | */
+/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
+/* NETIF_MSG_PKTDATA | */
+ NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int qlge_irq_type = MSIX_IRQ;
+module_param(qlge_irq_type, int, 0664);
+MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static const struct pci_device_id qlge_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ u32 sem_bits = 0;
+
+ switch (sem_mask) {
+ case SEM_XGMAC0_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+ break;
+ case SEM_XGMAC1_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+ break;
+ case SEM_ICB_MASK:
+ sem_bits = SEM_SET << SEM_ICB_SHIFT;
+ break;
+ case SEM_MAC_ADDR_MASK:
+ sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+ break;
+ case SEM_FLASH_MASK:
+ sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+ break;
+ case SEM_PROBE_MASK:
+ sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+ break;
+ case SEM_RT_IDX_MASK:
+ sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+ break;
+ case SEM_PROC_REG_MASK:
+ sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+ break;
+ default:
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
+ return -EINVAL;
+ }
+
+ ql_write32(qdev, SEM, sem_bits | sem_mask);
+ return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ unsigned int wait_count = 30;
+ do {
+ if (!ql_sem_trylock(qdev, sem_mask))
+ return 0;
+ udelay(100);
+ } while (--wait_count);
+ return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ ql_write32(qdev, SEM, sem_mask);
+ ql_read32(qdev, SEM); /* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register. It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = UDELAY_COUNT;
+
+ while (count) {
+ temp = ql_read32(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit) {
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
+ return -EIO;
+ } else if (temp & bit)
+ return 0;
+ udelay(UDELAY_DELAY);
+ count--;
+ }
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
+ return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+ int count = UDELAY_COUNT;
+ u32 temp;
+
+ while (count) {
+ temp = ql_read32(qdev, CFG);
+ if (temp & CFG_LE)
+ return -EIO;
+ if (!(temp & bit))
+ return 0;
+ udelay(UDELAY_DELAY);
+ count--;
+ }
+ return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id)
+{
+ u64 map;
+ int status = 0;
+ int direction;
+ u32 mask;
+ u32 value;
+
+ direction =
+ (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE;
+
+ map = pci_map_single(qdev->pdev, ptr, size, direction);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
+ return -ENOMEM;
+ }
+
+ status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+ if (status)
+ return status;
+
+ status = ql_wait_cfg(qdev, bit);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
+ goto exit;
+ }
+
+ ql_write32(qdev, ICB_L, (u32) map);
+ ql_write32(qdev, ICB_H, (u32) (map >> 32));
+
+ mask = CFG_Q_MASK | (bit << 16);
+ value = bit | (q_id << CFG_Q_SHIFT);
+ ql_write32(qdev, CFG, (mask | value));
+
+ /*
+ * Wait for the bit to clear after signaling hw.
+ */
+ status = ql_wait_cfg(qdev, bit);
+exit:
+ ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
+ pci_unmap_single(qdev->pdev, map, size, direction);
+ return status;
+}
+
+/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value)
+{
+ u32 offset = 0;
+ int status;
+
+ switch (type) {
+ case MAC_ADDR_TYPE_MULTI_MAC:
+ case MAC_ADDR_TYPE_CAM_MAC:
+ {
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ if (type == MAC_ADDR_TYPE_CAM_MAC) {
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ }
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN:
+ case MAC_ADDR_TYPE_MULTI_FLTR:
+ default:
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
+ status = -EPERM;
+ }
+exit:
+ return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+ u16 index)
+{
+ u32 offset = 0;
+ int status = 0;
+
+ switch (type) {
+ case MAC_ADDR_TYPE_MULTI_MAC:
+ {
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | (addr[5]);
+
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ break;
+ }
+ case MAC_ADDR_TYPE_CAM_MAC:
+ {
+ u32 cam_output;
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower =
+ (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ (addr[5]);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ /* This field should also include the queue id
+ and possibly the function id. Right now we hardcode
+ the route field to NIC core.
+ */
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qdev->
+ func << CAM_OUT_FUNC_SHIFT) |
+ (0 << CAM_OUT_CQ_ID_SHIFT));
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ cam_output |= CAM_OUT_RV;
+ /* route to NIC core */
+ ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN:
+ {
+ u32 enable_bit = *((u32 *) &addr[0]);
+ /* For VLAN, the addr actually holds a bit that
+ * either enables or disables the vlan id we are
+ * addressing. It's either MAC_ADDR_E on or off.
+ * That's bit-27 we're talking about.
+ */
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type | /* type */
+ enable_bit); /* enable/disable */
+ break;
+ }
+ case MAC_ADDR_TYPE_MULTI_FLTR:
+ default:
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
+ status = -EPERM;
+ }
+exit:
+ return status;
+}
+
+/* Set or clear MAC address in hardware. We sometimes
+ * have to clear it to prevent wrong frame routing
+ * especially in a bonding environment.
+ */
+static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+{
+ int status;
+ char zero_mac_addr[ETH_ALEN];
+ char *addr;
+
+ if (set) {
+ addr = &qdev->current_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
+ } else {
+ eth_zero_addr(zero_mac_addr);
+ addr = &zero_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
+ }
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
+ return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
+ netif_carrier_on(qdev->ndev);
+ ql_set_mac_addr(qdev, 1);
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
+ netif_carrier_off(qdev->ndev);
+ ql_set_mac_addr(qdev, 0);
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+ int status = 0;
+
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ if (status)
+ goto exit;
+
+ ql_write32(qdev, RT_IDX,
+ RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
+ if (status)
+ goto exit;
+ *value = ql_read32(qdev, RT_DATA);
+exit:
+ return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes. Each one can be used
+ * to route different frame types to various inbound queues. We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+ int enable)
+{
+ int status = -EINVAL; /* Return error if no mask match. */
+ u32 value = 0;
+
+ switch (mask) {
+ case RT_IDX_CAM_HIT:
+ {
+ value = RT_IDX_DST_CAM_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_VALID: /* Promiscuous Mode frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_IP_CSUM_ERR_SLOT <<
+ RT_IDX_IDX_SHIFT); /* index */
+ break;
+ }
+ case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
+ RT_IDX_IDX_SHIFT); /* index */
+ break;
+ }
+ case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_MCAST: /* Pass up All Multicast frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
+ {
+ value = RT_IDX_DST_RSS | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case 0: /* Clear the E-bit on an entry. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (index << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ default:
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (value) {
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ if (status)
+ goto exit;
+ value |= (enable ? RT_IDX_E : 0);
+ ql_write32(qdev, RT_IDX, value);
+ ql_write32(qdev, RT_DATA, enable ? mask : 0);
+ }
+exit:
+ return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+ ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+ ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented every time we queue a worker and decremented every time
+ * a worker finishes. Once it hits zero we enable the interrupt.
+ */
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+ u32 var = 0;
+ unsigned long hw_flags = 0;
+ struct intr_context *ctx = qdev->intr_context + intr;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
+ /* Always enable if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ return var;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (atomic_dec_and_test(&ctx->irq_cnt)) {
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return var;
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+ u32 var = 0;
+ struct intr_context *ctx;
+
+ /* HW disables for us if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
+ return 0;
+
+ ctx = qdev->intr_context + intr;
+ spin_lock(&qdev->hw_lock);
+ if (!atomic_read(&ctx->irq_cnt)) {
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_dis_mask);
+ var = ql_read32(qdev, STS);
+ }
+ atomic_inc(&ctx->irq_cnt);
+ spin_unlock(&qdev->hw_lock);
+ return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+ int i;
+ for (i = 0; i < qdev->intr_count; i++) {
+ /* The enable call does a atomic_dec_and_test
+ * and enables only if the result is zero.
+ * So we precharge it here.
+ */
+ if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
+ i == 0))
+ atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+ ql_enable_completion_interrupt(qdev, i);
+ }
+
+}
+
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+ int status, i;
+ u16 csum = 0;
+ __le16 *flash = (__le16 *)&qdev->flash;
+
+ status = strncmp((char *)&qdev->flash, str, 4);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
+ return status;
+ }
+
+ for (i = 0; i < size; i++)
+ csum += le16_to_cpu(*flash++);
+
+ if (csum)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+
+ return csum;
+}
+
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* This data is stored on flash as an array of
+ * __le32. Since ql_read32() returns cpu endian
+ * we need to swap it back.
+ */
+ *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+exit:
+ return status;
+}
+
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+ u32 i, size;
+ int status;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset;
+ u8 mac_addr[6];
+
+ /* Get flash offset for function and adjust
+ * for dword access.
+ */
+ if (!qdev->port)
+ offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+ else
+ offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+ if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ return -ETIMEDOUT;
+
+ size = sizeof(struct flash_params_8000) / sizeof(u32);
+ for (i = 0; i < size; i++, p++) {
+ status = ql_read_flash_word(qdev, i+offset, p);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
+ goto exit;
+ }
+ }
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8000) / sizeof(u16),
+ "8000");
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Extract either manufacturer or BOFM modified
+ * MAC address.
+ */
+ if (qdev->flash.flash_params_8000.data_type1 == 2)
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr1,
+ qdev->ndev->addr_len);
+ else
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ mac_addr,
+ qdev->ndev->addr_len);
+
+exit:
+ ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ return status;
+}
+
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
+{
+ int i;
+ int status;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset = 0;
+ u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
+
+ /* Second function's parameters follow the first
+ * function's.
+ */
+ if (qdev->port)
+ offset = size;
+
+ if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ return -ETIMEDOUT;
+
+ for (i = 0; i < size; i++, p++) {
+ status = ql_read_flash_word(qdev, i+offset, p);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
+ goto exit;
+ }
+
+ }
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8012) / sizeof(u16),
+ "8012");
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ qdev->flash.flash_params_8012.mac_addr,
+ qdev->ndev->addr_len);
+
+exit:
+ ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair. Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+ int status;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ return status;
+ /* write the data to the data reg */
+ ql_write32(qdev, XGMAC_DATA, data);
+ /* trigger the write */
+ ql_write32(qdev, XGMAC_ADDR, reg);
+ return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair. Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+ /* get the data */
+ *data = ql_read32(qdev, XGMAC_DATA);
+exit:
+ return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+ int status = 0;
+ u32 hi = 0;
+ u32 lo = 0;
+
+ status = ql_read_xgmac_reg(qdev, reg, &lo);
+ if (status)
+ goto exit;
+
+ status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+ if (status)
+ goto exit;
+
+ *data = (u64) lo | ((u64) hi << 32);
+
+exit:
+ return status;
+}
+
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+ int status;
+ /*
+ * Get MPI firmware version for driver banner
+ * and ethool info.
+ */
+ status = ql_mb_about_fw(qdev);
+ if (status)
+ goto exit;
+ status = ql_mb_get_fw_state(qdev);
+ if (status)
+ goto exit;
+ /* Wake up a worker to get/set the TX/RX frame sizes. */
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+ return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 data;
+
+ if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+ /* Another function has the semaphore, so
+ * wait for the port init bit to come ready.
+ */
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+ if (status) {
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
+ }
+ return status;
+ }
+
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
+ /* Set the core reset. */
+ status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+ if (status)
+ goto end;
+ data |= GLOBAL_CFG_RESET;
+ status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ if (status)
+ goto end;
+
+ /* Clear the core reset and turn on jumbo for receiver. */
+ data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
+ data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
+ data |= GLOBAL_CFG_TX_STAT_EN;
+ data |= GLOBAL_CFG_RX_STAT_EN;
+ status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ if (status)
+ goto end;
+
+ /* Enable transmitter, and clear it's reset. */
+ status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+ if (status)
+ goto end;
+ data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
+ data |= TX_CFG_EN; /* Enable the transmitter. */
+ status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+ if (status)
+ goto end;
+
+ /* Enable receiver and clear it's reset. */
+ status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+ if (status)
+ goto end;
+ data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
+ data |= RX_CFG_EN; /* Enable the receiver. */
+ status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+ if (status)
+ goto end;
+
+ /* Turn on jumbo. */
+ status =
+ ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+ if (status)
+ goto end;
+ status =
+ ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+ if (status)
+ goto end;
+
+ /* Signal to the world that the port is enabled. */
+ ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+ return status;
+}
+
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+ return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
+/* Get the next large buffer. */
+static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+ rx_ring->lbq_curr_idx++;
+ if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+ rx_ring->lbq_curr_idx = 0;
+ rx_ring->lbq_free_cnt++;
+ return lbq_desc;
+}
+
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(lbq_desc, mapaddr),
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* If it's the last chunk of our master page then
+ * we unmap it.
+ */
+ if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+ == ql_lbq_block_size(qdev))
+ pci_unmap_page(qdev->pdev,
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ return lbq_desc;
+}
+
+/* Get the next small buffer. */
+static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+ rx_ring->sbq_curr_idx++;
+ if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+ rx_ring->sbq_curr_idx = 0;
+ rx_ring->sbq_free_cnt++;
+ return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+ rx_ring->cnsmr_idx++;
+ rx_ring->curr_entry++;
+ if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+ rx_ring->cnsmr_idx = 0;
+ rx_ring->curr_entry = rx_ring->cq_base;
+ }
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+ ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+ struct bq_desc *lbq_desc)
+{
+ if (!rx_ring->pg_chunk.page) {
+ u64 map;
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ GFP_ATOMIC,
+ qdev->lbq_buf_order);
+ if (unlikely(!rx_ring->pg_chunk.page)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.offset = 0;
+ map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+ 0, ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ __free_pages(rx_ring->pg_chunk.page,
+ qdev->lbq_buf_order);
+ rx_ring->pg_chunk.page = NULL;
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.map = map;
+ rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ }
+
+ /* Copy the current master pg_chunk info
+ * to the current descriptor.
+ */
+ lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+ /* Adjust the master page chunk for next
+ * buffer get.
+ */
+ rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+ if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+ rx_ring->pg_chunk.page = NULL;
+ lbq_desc->p.pg_chunk.last_flag = 1;
+ } else {
+ rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+ get_page(rx_ring->pg_chunk.page);
+ lbq_desc->p.pg_chunk.last_flag = 0;
+ }
+ return 0;
+}
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ u32 clean_idx = rx_ring->lbq_clean_idx;
+ u32 start_idx = clean_idx;
+ struct bq_desc *lbq_desc;
+ u64 map;
+ int i;
+
+ while (rx_ring->lbq_free_cnt > 32) {
+ for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
+ lbq_desc = &rx_ring->lbq[clean_idx];
+ if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+ rx_ring->lbq_clean_idx = clean_idx;
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk, i=%d, clean_idx =%d .\n",
+ i, clean_idx);
+ return;
+ }
+
+ map = lbq_desc->p.pg_chunk.map +
+ lbq_desc->p.pg_chunk.offset;
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
+ rx_ring->lbq_buf_size);
+ *lbq_desc->addr = cpu_to_le64(map);
+
+ pci_dma_sync_single_for_device(qdev->pdev, map,
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+ clean_idx++;
+ if (clean_idx == rx_ring->lbq_len)
+ clean_idx = 0;
+ }
+
+ rx_ring->lbq_clean_idx = clean_idx;
+ rx_ring->lbq_prod_idx += 16;
+ if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+ rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
+ ql_write_db_reg(rx_ring->lbq_prod_idx,
+ rx_ring->lbq_prod_idx_db_reg);
+ }
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ u32 clean_idx = rx_ring->sbq_clean_idx;
+ u32 start_idx = clean_idx;
+ struct bq_desc *sbq_desc;
+ u64 map;
+ int i;
+
+ while (rx_ring->sbq_free_cnt > 16) {
+ for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
+ sbq_desc = &rx_ring->sbq[clean_idx];
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
+ if (sbq_desc->p.skb == NULL) {
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
+ sbq_desc->p.skb =
+ netdev_alloc_skb(qdev->ndev,
+ SMALL_BUFFER_SIZE);
+ if (sbq_desc->p.skb == NULL) {
+ rx_ring->sbq_clean_idx = clean_idx;
+ return;
+ }
+ skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+ map = pci_map_single(qdev->pdev,
+ sbq_desc->p.skb->data,
+ rx_ring->sbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
+ rx_ring->sbq_clean_idx = clean_idx;
+ dev_kfree_skb_any(sbq_desc->p.skb);
+ sbq_desc->p.skb = NULL;
+ return;
+ }
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
+ rx_ring->sbq_buf_size);
+ *sbq_desc->addr = cpu_to_le64(map);
+ }
+
+ clean_idx++;
+ if (clean_idx == rx_ring->sbq_len)
+ clean_idx = 0;
+ }
+ rx_ring->sbq_clean_idx = clean_idx;
+ rx_ring->sbq_prod_idx += 16;
+ if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+ rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
+ ql_write_db_reg(rx_ring->sbq_prod_idx,
+ rx_ring->sbq_prod_idx_db_reg);
+ }
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ ql_update_sbq(qdev, rx_ring);
+ ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers. Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+ struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+ int i;
+ for (i = 0; i < mapped; i++) {
+ if (i == 0 || (i == 7 && mapped > 7)) {
+ /*
+ * Unmap the skb->data area, or the
+ * external sglist (AKA the Outbound
+ * Address List (OAL)).
+ * If its the zeroeth element, then it's
+ * the skb->data area. If it's the 7th
+ * element and there is more than 6 frags,
+ * then its an OAL.
+ */
+ if (i == 7) {
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
+ }
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_ring_desc->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_ring_desc->map[i],
+ maplen),
+ PCI_DMA_TODEVICE);
+ } else {
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_ring_desc->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_ring_desc->map[i],
+ maplen), PCI_DMA_TODEVICE);
+ }
+ }
+
+}
+
+/* Map the buffers for this transmit. This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int frag_idx, err, map_idx = 0;
+ struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+ int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+ if (frag_cnt) {
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
+ }
+ /*
+ * Map the skb buffer first.
+ */
+ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ tbd->len = cpu_to_le32(len);
+ tbd->addr = cpu_to_le64(map);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ map_idx++;
+
+ /*
+ * This loop fills the remainder of the 8 address descriptors
+ * in the IOCB. If there are more than 7 fragments, then the
+ * eighth address desc will point to an external list (OAL).
+ * When this happens, the remainder of the frags will be stored
+ * in this list.
+ */
+ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+ tbd++;
+ if (frag_idx == 6 && frag_cnt > 7) {
+ /* Let's tack on an sglist.
+ * Our control block will now
+ * look like this:
+ * iocb->seg[0] = skb->data
+ * iocb->seg[1] = frag[0]
+ * iocb->seg[2] = frag[1]
+ * iocb->seg[3] = frag[2]
+ * iocb->seg[4] = frag[3]
+ * iocb->seg[5] = frag[4]
+ * iocb->seg[6] = frag[5]
+ * iocb->seg[7] = ptr to OAL (external sglist)
+ * oal->seg[0] = frag[6]
+ * oal->seg[1] = frag[7]
+ * oal->seg[2] = frag[8]
+ * oal->seg[3] = frag[9]
+ * oal->seg[4] = frag[10]
+ * etc...
+ */
+ /* Tack on the OAL in the eighth segment of IOCB. */
+ map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+ sizeof(struct oal),
+ PCI_DMA_TODEVICE);
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ tbd->addr = cpu_to_le64(map);
+ /*
+ * The length is the number of fragments
+ * that remain to be mapped times the length
+ * of our sglist (OAL).
+ */
+ tbd->len =
+ cpu_to_le32((sizeof(struct tx_buf_desc) *
+ (frag_cnt - frag_idx)) | TX_DESC_C);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ sizeof(struct oal));
+ tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
+ map_idx++;
+ }
+
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
+ goto map_error;
+ }
+
+ tbd->addr = cpu_to_le64(map);
+ tbd->len = cpu_to_le32(skb_frag_size(frag));
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ skb_frag_size(frag));
+
+ }
+ /* Save the number of segments we've mapped. */
+ tx_ring_desc->map_cnt = map_idx;
+ /* Terminate the last segment. */
+ tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+ return NETDEV_TX_OK;
+
+map_error:
+ /*
+ * If the first frag mapping failed, then i will be zero.
+ * This causes the unmap of the skb->data area. Otherwise
+ * we pass in the number of frags that mapped successfully
+ * so they can be umapped.
+ */
+ ql_unmap_send(qdev, tx_ring_desc, map_idx);
+ return NETDEV_TX_BUSY;
+}
+
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+ struct rx_ring *rx_ring)
+{
+ struct nic_stats *stats = &qdev->nic_stats;
+
+ stats->rx_err_count++;
+ rx_ring->rx_errors++;
+
+ switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+ case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+ stats->rx_code_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+ stats->rx_oversize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+ stats->rx_undersize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+ stats->rx_preamble_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+ stats->rx_frame_len_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_CRC:
+ stats->rx_crc_err++;
+ default:
+ break;
+ }
+}
+
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ return;
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == ETH_P_8021Q &&
+ tags[8] == ETH_P_8021Q)
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ goto err_out;
+ }
+
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + hlen) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, hlen), addr, hlen);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph =
+ (struct iphdr *)((u8 *)addr + hlen);
+ if (!(iph->frag_off &
+ htons(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "UDP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ htons(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "UDP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static void ql_realign_skb(struct sk_buff *skb, int len)
+{
+ void *temp_addr = skb->data;
+
+ /* Undo the skb_reserve(skb,32) we did before
+ * giving to hardware, and realign data on
+ * a 2-byte boundary.
+ */
+ skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+ skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+ skb_copy_to_linear_data(skb, temp_addr,
+ (unsigned int)len);
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion. It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ struct bq_desc *lbq_desc;
+ struct bq_desc *sbq_desc;
+ struct sk_buff *skb = NULL;
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
+
+ /*
+ * Handle the header buffer if present.
+ */
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
+ /*
+ * Headers fit nicely into a small buffer.
+ */
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb = sbq_desc->p.skb;
+ ql_realign_skb(skb, hdr_len);
+ skb_put(skb, hdr_len);
+ sbq_desc->p.skb = NULL;
+ }
+
+ /*
+ * Handle the data buffer(s).
+ */
+ if (unlikely(!length)) { /* Is there data too? */
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
+ return skb;
+ }
+
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
+ /*
+ * Data is less than small buffer size so it's
+ * stuffed in a small buffer.
+ * For this case we append the data
+ * from the "data" small buffer to the "header" small
+ * buffer.
+ */
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr
+ (sbq_desc, mapaddr),
+ dma_unmap_len
+ (sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb_put(skb, length),
+ sbq_desc->p.skb->data, length);
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr
+ (sbq_desc,
+ mapaddr),
+ dma_unmap_len
+ (sbq_desc,
+ maplen),
+ PCI_DMA_FROMDEVICE);
+ } else {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ skb = sbq_desc->p.skb;
+ ql_realign_skb(skb, length);
+ skb_put(skb, length);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc,
+ mapaddr),
+ dma_unmap_len(sbq_desc,
+ maplen),
+ PCI_DMA_FROMDEVICE);
+ sbq_desc->p.skb = NULL;
+ }
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
+ /*
+ * The data is in a single large buffer. We
+ * chain it to the header buffer's skb and let
+ * it rip.
+ */
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ } else {
+ /*
+ * The headers and data are in a single large buffer. We
+ * copy it to a new skb and let it go. This can happen with
+ * jumbo mtu on a non-TCP/UDP frame.
+ */
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ skb = netdev_alloc_skb(qdev->ndev, length);
+ if (skb == NULL) {
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ return NULL;
+ }
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(lbq_desc,
+ mapaddr),
+ dma_unmap_len(lbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, NET_IP_ALIGN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ length -= length;
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
+ }
+ } else {
+ /*
+ * The data is in a chain of large buffers
+ * pointed to by a small buffer. We loop
+ * thru and chain them to the our small header
+ * buffer's skb.
+ * frags: There are 18 max frags and our small
+ * buffer will hold 32 of them. The thing is,
+ * we'll use 3 max for our 9000 byte jumbo
+ * frames. If the MTU goes up we could
+ * eventually be in trouble.
+ */
+ int size, i = 0;
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+ /*
+ * This is an non TCP/UDP IP frame, so
+ * the headers aren't split into a small
+ * buffer. We have to use the small buffer
+ * that contains our sg list as our skb to
+ * send upstairs. Copy the sg list here to
+ * a local buffer and use it to find the
+ * pages to chain.
+ */
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
+ skb = sbq_desc->p.skb;
+ sbq_desc->p.skb = NULL;
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+ do {
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ size = (length < rx_ring->lbq_buf_size) ? length :
+ rx_ring->lbq_buf_size;
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
+ skb_fill_page_desc(skb, i,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+ length -= size;
+ i++;
+ } while (length > 0);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
+ }
+ return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+ if (unlikely(!skb)) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ prefetch(skb->data);
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ rx_ring->rx_multicast++;
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ htons(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ /* Non-TCP/UDP large frames that span multiple buffers
+ * can be processed corrrectly by the split frame logic.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ }
+
+ return (unsigned long)length;
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct tx_ring *tx_ring;
+ struct tx_ring_desc *tx_ring_desc;
+
+ QL_DUMP_OB_MAC_RSP(mac_rsp);
+ tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+ tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+ ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+ tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
+ tx_ring->tx_packets++;
+ dev_kfree_skb(tx_ring_desc->skb);
+ tx_ring_desc->skb = NULL;
+
+ if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
+ OB_MAC_IOCB_RSP_S |
+ OB_MAC_IOCB_RSP_L |
+ OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
+ }
+ }
+ atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+ ql_link_off(qdev);
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+ ql_link_off(qdev);
+ ql_disable_interrupts(qdev);
+ /* Clear adapter up bit to signal the recovery
+ * process that it shouldn't kill the reset worker
+ * thread
+ */
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ /* Set asic recovery bit to indicate reset process that we are
+ * in fatal error recovery process rather than normal close
+ */
+ set_bit(QL_ASIC_RECOVERY, &qdev->flags);
+ queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+ struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+ switch (ib_ae_rsp->event) {
+ case MGMT_ERR_EVENT:
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
+ ql_queue_fw_error(qdev);
+ return;
+
+ case CAM_LOOKUP_ERR_EVENT:
+ netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+ netdev_err(qdev->ndev, "This event shouldn't occur.\n");
+ ql_queue_asic_error(qdev);
+ return;
+
+ case SOFT_ECC_ERROR_EVENT:
+ netdev_err(qdev->ndev, "Soft ECC error detected.\n");
+ ql_queue_asic_error(qdev);
+ break;
+
+ case PCI_ERR_ANON_BUF_RD:
+ netdev_err(qdev->ndev, "PCI error occurred when reading "
+ "anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
+ ql_queue_asic_error(qdev);
+ break;
+
+ default:
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
+ ql_queue_asic_error(qdev);
+ break;
+ }
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ob_mac_iocb_rsp *net_rsp = NULL;
+ int count = 0;
+
+ struct tx_ring *tx_ring;
+ /* While there are entries in the completion queue. */
+ while (prod != rx_ring->cnsmr_idx) {
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+ net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+ rmb();
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_TSO_IOCB:
+ case OPCODE_OB_MAC_IOCB:
+ ql_process_mac_tx_intr(qdev, net_rsp);
+ break;
+ default:
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ }
+ count++;
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ }
+ if (!net_rsp)
+ return 0;
+ ql_write_cq_idx(rx_ring);
+ tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+ if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
+
+ return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ql_net_rsp_iocb *net_rsp;
+ int count = 0;
+
+ /* While there are entries in the completion queue. */
+ while (prod != rx_ring->cnsmr_idx) {
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+ net_rsp = rx_ring->curr_entry;
+ rmb();
+ switch (net_rsp->opcode) {
+ case OPCODE_IB_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, rx_ring,
+ (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ break;
+
+ case OPCODE_IB_AE_IOCB:
+ ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+ net_rsp);
+ break;
+ default:
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
+ }
+ count++;
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ if (count == budget)
+ break;
+ }
+ ql_update_buffer_queues(qdev, rx_ring);
+ ql_write_cq_idx(rx_ring);
+ return count;
+}
+
+static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct rx_ring *trx_ring;
+ int i, work_done = 0;
+ struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
+
+ /* Service the TX rings first. They start
+ * right after the RSS rings. */
+ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+ trx_ring = &qdev->rx_ring[i];
+ /* If this TX completion ring belongs to this vector and
+ * it's not empty then service it.
+ */
+ if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
+ (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+ trx_ring->cnsmr_idx)) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
+ ql_clean_outbound_rx_ring(trx_ring);
+ }
+ }
+
+ /*
+ * Now service the RSS ring if it's active.
+ */
+ if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+ rx_ring->cnsmr_idx) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
+ work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+ }
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ ql_enable_completion_interrupt(qdev, rx_ring->irq);
+ }
+ return work_done;
+}
+
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+ ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+ } else {
+ ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+ }
+}
+
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status = 0;
+ bool need_restart = netif_running(ndev);
+
+ if (need_restart) {
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
+ }
+
+ /* update the features with resent change */
+ ndev->features = features;
+
+ if (need_restart) {
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
+ }
+
+ return status;
+}
+
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ int err;
+
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
+
+ return features;
+}
+
+static int qlge_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ qlge_vlan_mode(ndev, features);
+
+ return 0;
+}
+
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+{
+ u32 enable_bit = MAC_ADDR_E;
+ int err;
+
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
+ return err;
+}
+
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+ int err;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+
+ err = __qlge_vlan_rx_add_vid(qdev, vid);
+ set_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
+}
+
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+{
+ u32 enable_bit = 0;
+ int err;
+
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
+ return err;
+}
+
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+ int err;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+
+ err = __qlge_vlan_rx_kill_vid(qdev, vid);
+ clear_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
+}
+
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ int status;
+ u16 vid;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
+ __qlge_vlan_rx_add_vid(qdev, vid);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+ struct rx_ring *rx_ring = dev_id;
+ napi_schedule(&rx_ring->napi);
+ return IRQ_HANDLED;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+ struct rx_ring *rx_ring = dev_id;
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+ u32 var;
+ int work_done = 0;
+
+ spin_lock(&qdev->hw_lock);
+ if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
+ spin_unlock(&qdev->hw_lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&qdev->hw_lock);
+
+ var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+
+ /*
+ * Check for fatal error.
+ */
+ if (var & STS_FE) {
+ ql_queue_asic_error(qdev);
+ netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
+ var = ql_read32(qdev, ERR_STS);
+ netdev_err(qdev->ndev, "Resetting chip. "
+ "Error Status Register = 0x%x\n", var);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Check MPI processor activity.
+ */
+ if ((var & STS_PI) &&
+ (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+ /*
+ * We've got an async event or mailbox completion.
+ * Handle it and clear the source of the interrupt.
+ */
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
+ ql_disable_completion_interrupt(qdev, intr_context->intr);
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work_on(smp_processor_id(),
+ qdev->workqueue, &qdev->mpi_work, 0);
+ work_done++;
+ }
+
+ /*
+ * Get the bit-mask that shows the active queues for this
+ * pass. Compare it to the queues that this irq services
+ * and call napi if there's a match.
+ */
+ var = ql_read32(qdev, ISR1);
+ if (var & intr_context->irq_mask) {
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
+ ql_disable_completion_interrupt(qdev, intr_context->intr);
+ napi_schedule(&rx_ring->napi);
+ work_done++;
+ }
+ ql_enable_completion_interrupt(qdev, intr_context->intr);
+ return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+ if (skb_is_gso(skb)) {
+ int err;
+ __be16 l3_proto = vlan_get_protocol(skb);
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+ mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ mac_iocb_ptr->net_trans_offset =
+ cpu_to_le16(skb_network_offset(skb) |
+ skb_transport_offset(skb)
+ << OB_MAC_TRANSPORT_HDR_SHIFT);
+ mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+ if (likely(l3_proto == htons(ETH_P_IP))) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->check = 0;
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+ struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+ int len;
+ struct iphdr *iph = ip_hdr(skb);
+ __sum16 *check;
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->net_trans_offset =
+ cpu_to_le16(skb_network_offset(skb) |
+ skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+ len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+ if (likely(iph->protocol == IPPROTO_TCP)) {
+ check = &(tcp_hdr(skb)->check);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) +
+ (tcp_hdr(skb)->doff << 2));
+ } else {
+ check = &(udp_hdr(skb)->check);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+ }
+ *check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, len, iph->protocol, 0);
+}
+
+static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int tso;
+ struct tx_ring *tx_ring;
+ u32 tx_ring_idx = (u32) skb->queue_mapping;
+
+ tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
+ __func__, tx_ring_idx);
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ tx_ring->tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+ tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+ mac_iocb_ptr = tx_ring_desc->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+ mac_iocb_ptr->tid = tx_ring_desc->index;
+ /* We use the upper 32-bits to store the tx queue for this IO.
+ * When we get the completion we can use it to establish the context.
+ */
+ mac_iocb_ptr->txq_idx = tx_ring_idx;
+ tx_ring_desc->skb = skb;
+
+ mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+ if (skb_vlan_tag_present(skb)) {
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
+ mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+ mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ }
+ tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ ql_hw_csum_setup(skb,
+ (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ }
+ if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+ NETDEV_TX_OK) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
+ tx_ring->tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+ QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+ tx_ring->prod_idx++;
+ if (tx_ring->prod_idx == tx_ring->wq_len)
+ tx_ring->prod_idx = 0;
+ wmb();
+
+ ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
+
+ atomic_dec(&tx_ring->tx_count);
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
+ return NETDEV_TX_OK;
+}
+
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+ if (qdev->rx_ring_shadow_reg_area) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
+ qdev->rx_ring_shadow_reg_area = NULL;
+ }
+ if (qdev->tx_ring_shadow_reg_area) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->tx_ring_shadow_reg_area,
+ qdev->tx_ring_shadow_reg_dma);
+ qdev->tx_ring_shadow_reg_area = NULL;
+ }
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+ qdev->rx_ring_shadow_reg_area =
+ pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+ &qdev->rx_ring_shadow_reg_dma);
+ if (qdev->rx_ring_shadow_reg_area == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
+ return -ENOMEM;
+ }
+
+ qdev->tx_ring_shadow_reg_area =
+ pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma);
+ if (qdev->tx_ring_shadow_reg_area == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
+ goto err_wqp_sh_area;
+ }
+ return 0;
+
+err_wqp_sh_area:
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
+ return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ int i;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ mac_iocb_ptr = tx_ring->wq_base;
+ tx_ring_desc = tx_ring->q;
+ for (i = 0; i < tx_ring->wq_len; i++) {
+ tx_ring_desc->index = i;
+ tx_ring_desc->skb = NULL;
+ tx_ring_desc->queue_entry = mac_iocb_ptr;
+ mac_iocb_ptr++;
+ tx_ring_desc++;
+ }
+ atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+ struct tx_ring *tx_ring)
+{
+ if (tx_ring->wq_base) {
+ pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+ }
+ kfree(tx_ring->q);
+ tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+ struct tx_ring *tx_ring)
+{
+ tx_ring->wq_base =
+ pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma);
+
+ if ((tx_ring->wq_base == NULL) ||
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+ goto pci_alloc_err;
+
+ tx_ring->q =
+ kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+ if (tx_ring->q == NULL)
+ goto err;
+
+ return 0;
+err:
+ pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+pci_alloc_err:
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
+ return -ENOMEM;
+}
+
+static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+
+ uint32_t curr_idx, clean_idx;
+
+ curr_idx = rx_ring->lbq_curr_idx;
+ clean_idx = rx_ring->lbq_clean_idx;
+ while (curr_idx != clean_idx) {
+ lbq_desc = &rx_ring->lbq[curr_idx];
+
+ if (lbq_desc->p.pg_chunk.last_flag) {
+ pci_unmap_page(qdev->pdev,
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ lbq_desc->p.pg_chunk.last_flag = 0;
+ }
+
+ put_page(lbq_desc->p.pg_chunk.page);
+ lbq_desc->p.pg_chunk.page = NULL;
+
+ if (++curr_idx == rx_ring->lbq_len)
+ curr_idx = 0;
+
+ }
+ if (rx_ring->pg_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->pg_chunk.page);
+ rx_ring->pg_chunk.page = NULL;
+ }
+}
+
+static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *sbq_desc;
+
+ for (i = 0; i < rx_ring->sbq_len; i++) {
+ sbq_desc = &rx_ring->sbq[i];
+ if (sbq_desc == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
+ return;
+ }
+ if (sbq_desc->p.skb) {
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(sbq_desc->p.skb);
+ sbq_desc->p.skb = NULL;
+ }
+ }
+}
+
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+ int i;
+ struct rx_ring *rx_ring;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->lbq)
+ ql_free_lbq_buffers(qdev, rx_ring);
+ if (rx_ring->sbq)
+ ql_free_sbq_buffers(qdev, rx_ring);
+ }
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->type != TX_Q)
+ ql_update_buffer_queues(qdev, rx_ring);
+ }
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *lbq_desc;
+ __le64 *bq = rx_ring->lbq_base;
+
+ memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+ for (i = 0; i < rx_ring->lbq_len; i++) {
+ lbq_desc = &rx_ring->lbq[i];
+ memset(lbq_desc, 0, sizeof(*lbq_desc));
+ lbq_desc->index = i;
+ lbq_desc->addr = bq;
+ bq++;
+ }
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *sbq_desc;
+ __le64 *bq = rx_ring->sbq_base;
+
+ memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
+ for (i = 0; i < rx_ring->sbq_len; i++) {
+ sbq_desc = &rx_ring->sbq[i];
+ memset(sbq_desc, 0, sizeof(*sbq_desc));
+ sbq_desc->index = i;
+ sbq_desc->addr = bq;
+ bq++;
+ }
+}
+
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ /* Free the small buffer queue. */
+ if (rx_ring->sbq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->sbq_size,
+ rx_ring->sbq_base, rx_ring->sbq_base_dma);
+ rx_ring->sbq_base = NULL;
+ }
+
+ /* Free the small buffer queue control blocks. */
+ kfree(rx_ring->sbq);
+ rx_ring->sbq = NULL;
+
+ /* Free the large buffer queue. */
+ if (rx_ring->lbq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->lbq_size,
+ rx_ring->lbq_base, rx_ring->lbq_base_dma);
+ rx_ring->lbq_base = NULL;
+ }
+
+ /* Free the large buffer queue control blocks. */
+ kfree(rx_ring->lbq);
+ rx_ring->lbq = NULL;
+
+ /* Free the rx queue. */
+ if (rx_ring->cq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->cq_size,
+ rx_ring->cq_base, rx_ring->cq_base_dma);
+ rx_ring->cq_base = NULL;
+ }
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+
+ /*
+ * Allocate the completion queue for this rx_ring.
+ */
+ rx_ring->cq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma);
+
+ if (rx_ring->cq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ if (rx_ring->sbq_len) {
+ /*
+ * Allocate small buffer queue.
+ */
+ rx_ring->sbq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+ &rx_ring->sbq_base_dma);
+
+ if (rx_ring->sbq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
+ goto err_mem;
+ }
+
+ /*
+ * Allocate small buffer queue control blocks.
+ */
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
+ goto err_mem;
+
+ ql_init_sbq_ring(qdev, rx_ring);
+ }
+
+ if (rx_ring->lbq_len) {
+ /*
+ * Allocate large buffer queue.
+ */
+ rx_ring->lbq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+ &rx_ring->lbq_base_dma);
+
+ if (rx_ring->lbq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
+ goto err_mem;
+ }
+ /*
+ * Allocate large buffer queue control blocks.
+ */
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
+ goto err_mem;
+
+ ql_init_lbq_ring(qdev, rx_ring);
+ }
+
+ return 0;
+
+err_mem:
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+ struct tx_ring *tx_ring;
+ struct tx_ring_desc *tx_ring_desc;
+ int i, j;
+
+ /*
+ * Loop through all queues and free
+ * any resources.
+ */
+ for (j = 0; j < qdev->tx_ring_count; j++) {
+ tx_ring = &qdev->tx_ring[j];
+ for (i = 0; i < tx_ring->wq_len; i++) {
+ tx_ring_desc = &tx_ring->q[i];
+ if (tx_ring_desc && tx_ring_desc->skb) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
+ ql_unmap_send(qdev, tx_ring_desc,
+ tx_ring_desc->map_cnt);
+ dev_kfree_skb(tx_ring_desc->skb);
+ tx_ring_desc->skb = NULL;
+ }
+ }
+ }
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+ int i;
+
+ for (i = 0; i < qdev->tx_ring_count; i++)
+ ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+ for (i = 0; i < qdev->rx_ring_count; i++)
+ ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+ ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+ int i;
+
+ /* Allocate space for our shadow registers and such. */
+ if (ql_alloc_shadow_space(qdev))
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
+ goto err_mem;
+ }
+ }
+ /* Allocate tx queue resources */
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
+ goto err_mem;
+ }
+ }
+ return 0;
+
+err_mem:
+ ql_free_mem_resources(qdev);
+ return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ struct cqicb *cqicb = &rx_ring->cqicb;
+ void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+ u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+ void __iomem *doorbell_area =
+ qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+ int err = 0;
+ u16 bq_len;
+ u64 tmp;
+ __le64 *base_indirect_ptr;
+ int page_entries;
+
+ /* Set up the shadow registers for this ring. */
+ rx_ring->prod_idx_sh_reg = shadow_reg;
+ rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+ *rx_ring->prod_idx_sh_reg = 0;
+ shadow_reg += sizeof(u64);
+ shadow_reg_dma += sizeof(u64);
+ rx_ring->lbq_base_indirect = shadow_reg;
+ rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ rx_ring->sbq_base_indirect = shadow_reg;
+ rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+ /* PCI doorbell mem area + 0x00 for consumer index register */
+ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+ rx_ring->cnsmr_idx = 0;
+ rx_ring->curr_entry = rx_ring->cq_base;
+
+ /* PCI doorbell mem area + 0x04 for valid register */
+ rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+ /* PCI doorbell mem area + 0x18 for large buffer consumer */
+ rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+
+ /* PCI doorbell mem area + 0x1c */
+ rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+
+ memset((void *)cqicb, 0, sizeof(struct cqicb));
+ cqicb->msix_vect = rx_ring->irq;
+
+ bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
+ cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+
+ cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
+
+ cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
+
+ /*
+ * Set up the control block load flags.
+ */
+ cqicb->flags = FLAGS_LC | /* Load queue base address */
+ FLAGS_LV | /* Load MSI-X vector */
+ FLAGS_LI; /* Load irq delay values */
+ if (rx_ring->lbq_len) {
+ cqicb->flags |= FLAGS_LL; /* Load lbq values */
+ tmp = (u64)rx_ring->lbq_base_dma;
+ base_indirect_ptr = rx_ring->lbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ cqicb->lbq_addr =
+ cpu_to_le64(rx_ring->lbq_base_indirect_dma);
+ bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
+ (u16) rx_ring->lbq_buf_size;
+ cqicb->lbq_buf_size = cpu_to_le16(bq_len);
+ bq_len = (rx_ring->lbq_len == 65536) ? 0 :
+ (u16) rx_ring->lbq_len;
+ cqicb->lbq_len = cpu_to_le16(bq_len);
+ rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_curr_idx = 0;
+ rx_ring->lbq_clean_idx = 0;
+ rx_ring->lbq_free_cnt = rx_ring->lbq_len;
+ }
+ if (rx_ring->sbq_len) {
+ cqicb->flags |= FLAGS_LS; /* Load sbq values */
+ tmp = (u64)rx_ring->sbq_base_dma;
+ base_indirect_ptr = rx_ring->sbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+ cqicb->sbq_addr =
+ cpu_to_le64(rx_ring->sbq_base_indirect_dma);
+ cqicb->sbq_buf_size =
+ cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+ bq_len = (rx_ring->sbq_len == 65536) ? 0 :
+ (u16) rx_ring->sbq_len;
+ cqicb->sbq_len = cpu_to_le16(bq_len);
+ rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_curr_idx = 0;
+ rx_ring->sbq_clean_idx = 0;
+ rx_ring->sbq_free_cnt = rx_ring->sbq_len;
+ }
+ switch (rx_ring->type) {
+ case TX_Q:
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+ break;
+ case RX_Q:
+ /* Inbound completion handling rx_rings run in
+ * separate NAPI contexts.
+ */
+ netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+ 64);
+ cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+ break;
+ default:
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
+ }
+ err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
+ return err;
+ }
+ return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+ struct wqicb *wqicb = (struct wqicb *)tx_ring;
+ void __iomem *doorbell_area =
+ qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+ void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+ (tx_ring->wq_id * sizeof(u64));
+ u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+ (tx_ring->wq_id * sizeof(u64));
+ int err = 0;
+
+ /*
+ * Assign doorbell registers for this tx_ring.
+ */
+ /* TX PCI doorbell mem area for tx producer index */
+ tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+ tx_ring->prod_idx = 0;
+ /* TX PCI doorbell mem area + 0x04 */
+ tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+ /*
+ * Assign shadow registers for this tx_ring.
+ */
+ tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+ tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+ wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+ wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+ Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+ wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+ wqicb->rid = 0;
+ wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
+
+ wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
+
+ ql_init_tx_ring(qdev, tx_ring);
+
+ err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
+ (u16) tx_ring->wq_id);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
+ return err;
+ }
+ return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ pci_disable_msix(qdev->pdev);
+ clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+ kfree(qdev->msi_x_entry);
+ qdev->msi_x_entry = NULL;
+ } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ pci_disable_msi(qdev->pdev);
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ }
+}
+
+/* We start by trying to get the number of vectors
+ * stored in qdev->intr_count. If we don't get that
+ * many then we reduce the count and try again.
+ */
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+ int i, err;
+
+ /* Get the MSIX vectors. */
+ if (qlge_irq_type == MSIX_IRQ) {
+ /* Try to alloc space for the msix struct,
+ * if it fails then go to MSI/legacy.
+ */
+ qdev->msi_x_entry = kcalloc(qdev->intr_count,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!qdev->msi_x_entry) {
+ qlge_irq_type = MSI_IRQ;
+ goto msi;
+ }
+
+ for (i = 0; i < qdev->intr_count; i++)
+ qdev->msi_x_entry[i].entry = i;
+
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
+ if (err < 0) {
+ kfree(qdev->msi_x_entry);
+ qdev->msi_x_entry = NULL;
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
+ qlge_irq_type = MSI_IRQ;
+ } else {
+ qdev->intr_count = err;
+ set_bit(QL_MSIX_ENABLED, &qdev->flags);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
+ return;
+ }
+ }
+msi:
+ qdev->intr_count = 1;
+ if (qlge_irq_type == MSI_IRQ) {
+ if (!pci_enable_msi(qdev->pdev)) {
+ set_bit(QL_MSI_ENABLED, &qdev->flags);
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
+ return;
+ }
+ }
+ qlge_irq_type = LEG_IRQ;
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
+}
+
+/* Each vector services 1 RSS ring and and 1 or more
+ * TX completion rings. This function loops through
+ * the TX completion rings and assigns the vector that
+ * will service it. An example would be if there are
+ * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
+ * This would mean that vector 0 would service RSS ring 0
+ * and TX completion rings 0,1,2 and 3. Vector 1 would
+ * service RSS ring 1 and TX completion rings 4,5,6 and 7.
+ */
+static void ql_set_tx_vect(struct ql_adapter *qdev)
+{
+ int i, j, vect;
+ u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Assign irq vectors to TX rx_rings.*/
+ for (vect = 0, j = 0, i = qdev->rss_ring_count;
+ i < qdev->rx_ring_count; i++) {
+ if (j == tx_rings_per_vector) {
+ vect++;
+ j = 0;
+ }
+ qdev->rx_ring[i].irq = vect;
+ j++;
+ }
+ } else {
+ /* For single vector all rings have an irq
+ * of zero.
+ */
+ for (i = 0; i < qdev->rx_ring_count; i++)
+ qdev->rx_ring[i].irq = 0;
+ }
+}
+
+/* Set the interrupt mask for this vector. Each vector
+ * will service 1 RSS ring and 1 or more TX completion
+ * rings. This function sets up a bit mask per vector
+ * that indicates which rings it services.
+ */
+static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
+{
+ int j, vect = ctx->intr;
+ u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Add the RSS ring serviced by this vector
+ * to the mask.
+ */
+ ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
+ /* Add the TX ring(s) serviced by this vector
+ * to the mask. */
+ for (j = 0; j < tx_rings_per_vector; j++) {
+ ctx->irq_mask |=
+ (1 << qdev->rx_ring[qdev->rss_ring_count +
+ (vect * tx_rings_per_vector) + j].cq_id);
+ }
+ } else {
+ /* For single vector we just shift each queue's
+ * ID into the mask.
+ */
+ for (j = 0; j < qdev->rx_ring_count; j++)
+ ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
+ }
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+ int i = 0;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Each rx_ring has it's
+ * own intr_context since we have separate
+ * vectors for each queue.
+ */
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ qdev->rx_ring[i].irq = i;
+ intr_context->intr = i;
+ intr_context->qdev = qdev;
+ /* Set up this vector's bit-mask that indicates
+ * which queues it services.
+ */
+ ql_set_irq_mask(qdev, intr_context);
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in the critical path.
+ */
+ intr_context->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+ | i;
+ intr_context->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+ INTR_EN_IHD | i;
+ intr_context->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+ i;
+ if (i == 0) {
+ /* The first vector/queue handles
+ * broadcast/multicast, fatal errors,
+ * and firmware events. This in addition
+ * to normal inbound NAPI processing.
+ */
+ intr_context->handler = qlge_isr;
+ sprintf(intr_context->name, "%s-rx-%d",
+ qdev->ndev->name, i);
+ } else {
+ /*
+ * Inbound queues handle unicast frames only.
+ */
+ intr_context->handler = qlge_msix_rx_isr;
+ sprintf(intr_context->name, "%s-rx-%d",
+ qdev->ndev->name, i);
+ }
+ }
+ } else {
+ /*
+ * All rx_rings use the same intr_context since
+ * there is only one vector.
+ */
+ intr_context->intr = 0;
+ intr_context->qdev = qdev;
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in the critical path.
+ */
+ intr_context->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+ intr_context->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE;
+ intr_context->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+ /*
+ * Single interrupt means one handler for all rings.
+ */
+ intr_context->handler = qlge_isr;
+ sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+ /* Set up this vector's bit-mask that indicates
+ * which queues it services. In this case there is
+ * a single vector so it will service all RSS and
+ * TX completion rings.
+ */
+ ql_set_irq_mask(qdev, intr_context);
+ }
+ /* Tell the TX completion rings which MSIx vector
+ * they will be using.
+ */
+ ql_set_tx_vect(qdev);
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+ int i;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ if (intr_context->hooked) {
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ free_irq(qdev->msi_x_entry[i].vector,
+ &qdev->rx_ring[i]);
+ } else {
+ free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+ }
+ }
+ }
+ ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+ int i;
+ int status = 0;
+ struct pci_dev *pdev = qdev->pdev;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ ql_resolve_queues_to_irqs(qdev);
+
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ atomic_set(&intr_context->irq_cnt, 0);
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ status = request_irq(qdev->msi_x_entry[i].vector,
+ intr_context->handler,
+ 0,
+ intr_context->name,
+ &qdev->rx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
+ goto err_irq;
+ }
+ } else {
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
+ status =
+ request_irq(pdev->irq, qlge_isr,
+ test_bit(QL_MSI_ENABLED,
+ &qdev->
+ flags) ? 0 : IRQF_SHARED,
+ intr_context->name, &qdev->rx_ring[0]);
+ if (status)
+ goto err_irq;
+
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
+ }
+ intr_context->hooked = 1;
+ }
+ return status;
+err_irq:
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
+ ql_free_irq(qdev);
+ return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+ static const u8 init_hash_seed[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+ };
+ struct ricb *ricb = &qdev->ricb;
+ int status = 0;
+ int i;
+ u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+ memset((void *)ricb, 0, sizeof(*ricb));
+
+ ricb->base_cq = RSS_L4K;
+ ricb->flags =
+ (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
+ ricb->mask = cpu_to_le16((u16)(0x3ff));
+
+ /*
+ * Fill out the Indirection Table.
+ */
+ for (i = 0; i < 1024; i++)
+ hash_id[i] = (i & (qdev->rss_ring_count - 1));
+
+ memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
+ memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
+
+ status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
+ return status;
+ }
+ return status;
+}
+
+static int ql_clear_routing_entries(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+ /* Clear all the entries in the routing table. */
+ for (i = 0; i < 16; i++) {
+ status = ql_set_routing_reg(qdev, i, 0, 0);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
+ break;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status)
+ return status;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
+ RT_IDX_IP_CSUM_ERR, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register "
+ "for IP CSUM error packets.\n");
+ goto exit;
+ }
+ status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
+ RT_IDX_TU_CSUM_ERR, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register "
+ "for TCP/UDP CSUM error packets.\n");
+ goto exit;
+ }
+ status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
+ goto exit;
+ }
+ /* If we have more than one inbound queue, then turn on RSS in the
+ * routing block.
+ */
+ if (qdev->rss_ring_count > 1) {
+ status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+ RT_IDX_RSS_MATCH, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
+ goto exit;
+ }
+ }
+
+ status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+ RT_IDX_CAM_HIT, 1);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
+exit:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+ int status, set;
+
+ /* If check if the link is up and use to
+ * determine if we are setting or clearing
+ * the MAC address in the CAM.
+ */
+ set = ql_read32(qdev, STS);
+ set &= qdev->port_link_up;
+ status = ql_set_mac_addr(qdev, set);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
+ return status;
+ }
+
+ status = ql_route_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
+
+ return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+ u32 value, mask;
+ int i;
+ int status = 0;
+
+ /*
+ * Set up the System register to halt on errors.
+ */
+ value = SYS_EFE | SYS_FAE;
+ mask = value << 16;
+ ql_write32(qdev, SYS, mask | value);
+
+ /* Set the default queue, and VLAN behavior. */
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ value |= NIC_RCV_CFG_RV;
+ mask |= (NIC_RCV_CFG_RV << 16);
+ }
+ ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+ /* Set the MPI interrupt to enabled. */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+ /* Enable the function, set pagesize, enable error checking. */
+ value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
+
+ /* Set/clear header splitting. */
+ mask = FSC_VM_PAGESIZE_MASK |
+ FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+ ql_write32(qdev, FSC, mask | value);
+
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
+
+ /* Set RX packet routing to use port/pci function on which the
+ * packet arrived on in addition to usual frame routing.
+ * This is helpful on bonding where both interfaces can have
+ * the same MAC address.
+ */
+ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+ /* Reroute all packets to our Interface.
+ * They may have been routed to MPI firmware
+ * due to WOL.
+ */
+ value = ql_read32(qdev, MGMT_RCV_CFG);
+ value &= ~MGMT_RCV_CFG_RM;
+ mask = 0xffff0000;
+
+ /* Sticky reg needs clearing due to WOL. */
+ ql_write32(qdev, MGMT_RCV_CFG, mask);
+ ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+ /* Default WOL is enable on Mezz cards */
+ if (qdev->pdev->subsystem_device == 0x0068 ||
+ qdev->pdev->subsystem_device == 0x0180)
+ qdev->wol = WAKE_MAGIC;
+
+ /* Start up the rx queues. */
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
+ return status;
+ }
+ }
+
+ /* If there is more than one inbound completion queue
+ * then download a RICB to configure RSS.
+ */
+ if (qdev->rss_ring_count > 1) {
+ status = ql_start_rss(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
+ return status;
+ }
+ }
+
+ /* Start up the tx queues. */
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
+ return status;
+ }
+ }
+
+ /* Initialize the port and set the max framesize. */
+ status = qdev->nic_ops->port_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
+
+ /* Set up the MAC address and frame routing filter. */
+ status = ql_cam_route_initialize(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ return status;
+ }
+
+ /* Start NAPI for the RSS queues. */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ napi_enable(&qdev->rx_ring[i].napi);
+
+ return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+ u32 value;
+ int status = 0;
+ unsigned long end_jiffies;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
+ return status;
+ }
+
+ end_jiffies = jiffies +
+ max((unsigned long)1, usecs_to_jiffies(30));
+
+ /* Check if bit is set then skip the mailbox command and
+ * clear the bit, else we are in normal reset process.
+ */
+ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+ } else
+ clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+
+ ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+
+ do {
+ value = ql_read32(qdev, RST_FO);
+ if ((value & RST_FO_FR) == 0)
+ break;
+ cpu_relax();
+ } while (time_before(jiffies, end_jiffies));
+
+ if (value & RST_FO_FR) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ status = -ETIMEDOUT;
+ }
+
+ /* Resume management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
+ return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_wol(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 wol = MB_WOL_DISABLE;
+
+ /* The CAM is still intact after a reset, but if we
+ * are doing WOL, then we may need to program the
+ * routing regs. We would also need to issue the mailbox
+ * commands to instruct the MPI what to do per the ethtool
+ * settings.
+ */
+
+ if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+ WAKE_MCAST | WAKE_BCAST)) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
+ qdev->wol);
+ return -EINVAL;
+ }
+
+ if (qdev->wol & WAKE_MAGIC) {
+ status = ql_mb_wol_set_magic(qdev, 1);
+ if (status) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
+ return status;
+ } else
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
+
+ wol |= MB_WOL_MAGIC_PKT;
+ }
+
+ if (qdev->wol) {
+ wol |= MB_WOL_MODE_ON;
+ status = ql_mb_wol_mode(qdev, wol);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Successfully set" : "Failed",
+ wol, qdev->ndev->name);
+ }
+
+ return status;
+}
+
+static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
+{
+
+ /* Don't kill the reset worker thread if we
+ * are in the process of recovery.
+ */
+ if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+ cancel_delayed_work_sync(&qdev->asic_reset_work);
+ cancel_delayed_work_sync(&qdev->mpi_reset_work);
+ cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ ql_link_off(qdev);
+
+ ql_cancel_all_work_sync(qdev);
+
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ napi_disable(&qdev->rx_ring[i].napi);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ ql_tx_ring_clean(qdev);
+
+ /* Call netif_napi_del() from common point.
+ */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ netif_napi_del(&qdev->rx_ring[i].napi);
+
+ status = ql_adapter_reset(qdev);
+ if (status)
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
+ ql_free_rx_buffers(qdev);
+
+ return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+ int err = 0;
+
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
+ goto err_init;
+ }
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ ql_alloc_rx_buffers(qdev);
+ /* If the port is initialized and the
+ * link is up the turn on the carrier.
+ */
+ if ((ql_read32(qdev, STS) & qdev->port_init) &&
+ (ql_read32(qdev, STS) & qdev->port_link_up))
+ ql_link_on(qdev);
+ /* Restore rx mode. */
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ qlge_set_multicast_list(qdev->ndev);
+
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
+ ql_enable_interrupts(qdev);
+ ql_enable_all_completion_interrupts(qdev);
+ netif_tx_start_all_queues(qdev->ndev);
+
+ return 0;
+err_init:
+ ql_adapter_reset(qdev);
+ return err;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+ ql_free_mem_resources(qdev);
+ ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ status = ql_request_irq(qdev);
+ return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+ msleep(1);
+ ql_adapter_down(qdev);
+ ql_release_adapter_resources(qdev);
+ return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+ int i;
+ struct rx_ring *rx_ring;
+ struct tx_ring *tx_ring;
+ int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+ unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+ /* In a perfect world we have one RSS ring for each CPU
+ * and each has it's own vector. To do that we ask for
+ * cpu_cnt vectors. ql_enable_msix() will adjust the
+ * vector count to what we actually get. We then
+ * allocate an RSS ring for each.
+ * Essentially, we are doing min(cpu_count, msix_vector_count).
+ */
+ qdev->intr_count = cpu_cnt;
+ ql_enable_msix(qdev);
+ /* Adjust the RSS ring count to the actual vector count. */
+ qdev->rss_ring_count = qdev->intr_count;
+ qdev->tx_ring_count = cpu_cnt;
+ qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
+
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ tx_ring = &qdev->tx_ring[i];
+ memset((void *)tx_ring, 0, sizeof(*tx_ring));
+ tx_ring->qdev = qdev;
+ tx_ring->wq_id = i;
+ tx_ring->wq_len = qdev->tx_ring_size;
+ tx_ring->wq_size =
+ tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+ /*
+ * The completion queue ID for the tx rings start
+ * immediately after the rss rings.
+ */
+ tx_ring->cq_id = qdev->rss_ring_count + i;
+ }
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ memset((void *)rx_ring, 0, sizeof(*rx_ring));
+ rx_ring->qdev = qdev;
+ rx_ring->cq_id = i;
+ rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
+ if (i < qdev->rss_ring_count) {
+ /*
+ * Inbound (RSS) queues.
+ */
+ rx_ring->cq_len = qdev->rx_ring_size;
+ rx_ring->cq_size =
+ rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+ rx_ring->lbq_size =
+ rx_ring->lbq_len * sizeof(__le64);
+ rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+ rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+ rx_ring->sbq_size =
+ rx_ring->sbq_len * sizeof(__le64);
+ rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
+ rx_ring->type = RX_Q;
+ } else {
+ /*
+ * Outbound queue handles outbound completions only.
+ */
+ /* outbound cq is same size as tx_ring it services. */
+ rx_ring->cq_len = qdev->tx_ring_size;
+ rx_ring->cq_size =
+ rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->lbq_len = 0;
+ rx_ring->lbq_size = 0;
+ rx_ring->lbq_buf_size = 0;
+ rx_ring->sbq_len = 0;
+ rx_ring->sbq_size = 0;
+ rx_ring->sbq_buf_size = 0;
+ rx_ring->type = TX_Q;
+ }
+ }
+ return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+ int err = 0;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ err = ql_adapter_reset(qdev);
+ if (err)
+ return err;
+
+ err = ql_configure_rings(qdev);
+ if (err)
+ return err;
+
+ err = ql_get_adapter_resources(qdev);
+ if (err)
+ goto error_up;
+
+ err = ql_adapter_up(qdev);
+ if (err)
+ goto error_up;
+
+ return err;
+
+error_up:
+ ql_release_adapter_resources(qdev);
+ return err;
+}
+
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i, status;
+ u32 lbq_buf_len;
+
+ /* Wait for an outstanding reset to complete. */
+ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 3;
+ while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
+ ssleep(1);
+ }
+
+ if (!i) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ /* Get the new rx buffer size. */
+ lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+ for (i = 0; i < qdev->rss_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ /* Set the new size. */
+ rx_ring->lbq_buf_size = lbq_buf_len;
+ }
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ return status;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ if (ndev->mtu == 1500 && new_mtu == 9000) {
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
+ } else if (ndev->mtu == 9000 && new_mtu == 1500) {
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
+ } else
+ return -EINVAL;
+
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 3*HZ);
+
+ ndev->mtu = new_mtu;
+
+ if (!netif_running(qdev->ndev)) {
+ return 0;
+ }
+
+ status = ql_change_rx_buffers(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
+ }
+
+ return status;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+ *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct rx_ring *rx_ring = &qdev->rx_ring[0];
+ struct tx_ring *tx_ring = &qdev->tx_ring[0];
+ unsigned long pkts, mcast, dropped, errors, bytes;
+ int i;
+
+ /* Get RX stats. */
+ pkts = mcast = dropped = errors = bytes = 0;
+ for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+ pkts += rx_ring->rx_packets;
+ bytes += rx_ring->rx_bytes;
+ dropped += rx_ring->rx_dropped;
+ errors += rx_ring->rx_errors;
+ mcast += rx_ring->rx_multicast;
+ }
+ ndev->stats.rx_packets = pkts;
+ ndev->stats.rx_bytes = bytes;
+ ndev->stats.rx_dropped = dropped;
+ ndev->stats.rx_errors = errors;
+ ndev->stats.multicast = mcast;
+
+ /* Get TX stats. */
+ pkts = errors = bytes = 0;
+ for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
+ pkts += tx_ring->tx_packets;
+ bytes += tx_ring->tx_bytes;
+ errors += tx_ring->tx_errors;
+ }
+ ndev->stats.tx_packets = pkts;
+ ndev->stats.tx_bytes = bytes;
+ ndev->stats.tx_errors = errors;
+ return &ndev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ int i, status;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return;
+ /*
+ * Set or clear promiscuous mode if a
+ * transition is taking place.
+ */
+ if (ndev->flags & IFF_PROMISC) {
+ if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscuous mode.\n");
+ } else {
+ set_bit(QL_PROMISCUOUS, &qdev->flags);
+ }
+ }
+ } else {
+ if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscuous mode.\n");
+ } else {
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ }
+ }
+ }
+
+ /*
+ * Set or clear all multicast mode if a
+ * transition is taking place.
+ */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
+ if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
+ } else {
+ set_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+ } else {
+ if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
+ } else {
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+ }
+
+ if (!netdev_mc_empty(ndev)) {
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ goto exit;
+ i = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
+ MAC_ADDR_TYPE_MULTI_MAC, i)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ goto exit;
+ }
+ i++;
+ }
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
+ } else {
+ set_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+exit:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int status;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ /* Update local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ if (status)
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ ql_queue_asic_error(qdev);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, asic_reset_work.work);
+ int status;
+ rtnl_lock();
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ /* Restore rx mode. */
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ qlge_set_multicast_list(qdev->ndev);
+
+ rtnl_unlock();
+ return;
+error:
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ rtnl_unlock();
+}
+
+static const struct nic_operations qla8012_nic_ops = {
+ .get_flash = ql_get_8012_flash_params,
+ .port_initialize = ql_8012_port_initialize,
+};
+
+static const struct nic_operations qla8000_nic_ops = {
+ .get_flash = ql_get_8000_flash_params,
+ .port_initialize = ql_8000_port_initialize,
+};
+
+/* Find the pcie function number for the other NIC
+ * on this chip. Since both NIC functions share a
+ * common firmware we have the lowest enabled function
+ * do any common work. Examples would be resetting
+ * after a fatal firmware error, or doing a firmware
+ * coredump.
+ */
+static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 temp;
+ u32 nic_func1, nic_func2;
+
+ status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+ &temp);
+ if (status)
+ return status;
+
+ nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+ nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+
+ if (qdev->func == nic_func1)
+ qdev->alt_func = nic_func2;
+ else if (qdev->func == nic_func2)
+ qdev->alt_func = nic_func1;
+ else
+ status = -EIO;
+
+ return status;
+}
+
+static int ql_get_board_info(struct ql_adapter *qdev)
+{
+ int status;
+ qdev->func =
+ (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+ if (qdev->func > 3)
+ return -EIO;
+
+ status = ql_get_alt_pcie_func(qdev);
+ if (status)
+ return status;
+
+ qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
+ if (qdev->port) {
+ qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+ qdev->port_link_up = STS_PL1;
+ qdev->port_init = STS_PI1;
+ qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+ qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+ } else {
+ qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+ qdev->port_link_up = STS_PL0;
+ qdev->port_init = STS_PI0;
+ qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+ qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+ }
+ qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+ qdev->device_id = qdev->pdev->device;
+ if (qdev->device_id == QLGE_DEVICE_ID_8012)
+ qdev->nic_ops = &qla8012_nic_ops;
+ else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+ qdev->nic_ops = &qla8000_nic_ops;
+ return status;
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (qdev->workqueue) {
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ if (qdev->reg_base)
+ iounmap(qdev->reg_base);
+ if (qdev->doorbell_area)
+ iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
+ pci_release_regions(pdev);
+}
+
+static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
+ int cards_found)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err = 0;
+
+ memset((void *)qdev, 0, sizeof(*qdev));
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI device enable failed.\n");
+ return err;
+ }
+
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ pci_set_drvdata(pdev, ndev);
+
+ /* Set PCIe read request size */
+ err = pcie_set_readrq(pdev, 4096);
+ if (err) {
+ dev_err(&pdev->dev, "Set readrq failed.\n");
+ goto err_out1;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "PCI region request failed.\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ set_bit(QL_DMA64, &qdev->flags);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto err_out2;
+ }
+
+ /* Set PCIe reset type for EEH to fundamental. */
+ pdev->needs_freset = 1;
+ pci_save_state(pdev);
+ qdev->reg_base =
+ ioremap_nocache(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!qdev->reg_base) {
+ dev_err(&pdev->dev, "Register mapping failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+ qdev->doorbell_area =
+ ioremap_nocache(pci_resource_start(pdev, 3),
+ pci_resource_len(pdev, 3));
+ if (!qdev->doorbell_area) {
+ dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ err = ql_get_board_info(qdev);
+ if (err) {
+ dev_err(&pdev->dev, "Register access failed.\n");
+ err = -EIO;
+ goto err_out2;
+ }
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+ spin_lock_init(&qdev->hw_lock);
+ spin_lock_init(&qdev->stats_lock);
+
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
+ /* make sure the EEPROM is good */
+ err = qdev->nic_ops->get_flash(qdev);
+ if (err) {
+ dev_err(&pdev->dev, "Invalid FLASH.\n");
+ goto err_out2;
+ }
+
+ /* Keep local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+ /* Set up the default ring sizes. */
+ qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+ qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+ /* Set up the coalescing parameters. */
+ qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+ qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+ qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+ qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+ /*
+ * Set up the operating parameters.
+ */
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+ INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+ INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
+ init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
+
+ if (!cards_found) {
+ dev_info(&pdev->dev, "%s\n", DRV_STRING);
+ dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ return 0;
+err_out2:
+ ql_release_all(pdev);
+err_out1:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static const struct net_device_ops qlge_netdev_ops = {
+ .ndo_open = qlge_open,
+ .ndo_stop = qlge_close,
+ .ndo_start_xmit = qlge_send,
+ .ndo_change_mtu = qlge_change_mtu,
+ .ndo_get_stats = qlge_get_stats,
+ .ndo_set_rx_mode = qlge_set_multicast_list,
+ .ndo_set_mac_address = qlge_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = qlge_tx_timeout,
+ .ndo_fix_features = qlge_fix_features,
+ .ndo_set_features = qlge_set_features,
+ .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
+};
+
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+}
+
+static int qlge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql_adapter *qdev = NULL;
+ static int cards_found = 0;
+ int err = 0;
+
+ ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+ min(MAX_CPUS, netif_get_num_default_rss_queues()));
+ if (!ndev)
+ return -ENOMEM;
+
+ err = ql_init_device(pdev, ndev, cards_found);
+ if (err < 0) {
+ free_netdev(ndev);
+ return err;
+ }
+
+ qdev = netdev_priv(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features;
+ ndev->vlan_features = ndev->hw_features;
+ /* vlan gets same features (except vlan filter) */
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
+ if (test_bit(QL_DMA64, &qdev->flags))
+ ndev->features |= NETIF_F_HIGHDMA;
+
+ /*
+ * Set up net_device structure.
+ */
+ ndev->tx_queue_len = qdev->tx_ring_size;
+ ndev->irq = pdev->irq;
+
+ ndev->netdev_ops = &qlge_netdev_ops;
+ ndev->ethtool_ops = &qlge_ethtool_ops;
+ ndev->watchdog_timeo = 10 * HZ;
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "net device registration failed.\n");
+ ql_release_all(pdev);
+ pci_disable_device(pdev);
+ free_netdev(ndev);
+ return err;
+ }
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+ ql_link_off(qdev);
+ ql_display_dev_info(ndev);
+ atomic_set(&qdev->lb_count, 0);
+ cards_found++;
+ return 0;
+}
+
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ return qlge_send(skb, ndev);
+}
+
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+ return ql_clean_inbound_rx_ring(rx_ring, budget);
+}
+
+static void qlge_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
+ unregister_netdev(ndev);
+ ql_release_all(pdev);
+ pci_disable_device(pdev);
+ free_netdev(ndev);
+}
+
+/* Clean up resources without touching hardware. */
+static void ql_eeh_close(struct net_device *ndev)
+{
+ int i;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+ }
+
+ /* Disabling the timer */
+ del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
+
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ netif_napi_del(&qdev->rx_ring[i].napi);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ ql_tx_ring_clean(qdev);
+ ql_free_rx_buffers(qdev);
+ ql_release_adapter_resources(qdev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ netif_device_detach(ndev);
+ if (netif_running(ndev))
+ ql_eeh_close(ndev);
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_err(&pdev->dev,
+ "%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ pdev->error_state = pci_channel_io_normal;
+
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err = 0;
+
+ if (netif_running(ndev)) {
+ err = qlge_open(ndev);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
+ return;
+ }
+ } else {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
+ }
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+ netif_device_attach(ndev);
+}
+
+static const struct pci_error_handlers qlge_err_handler = {
+ .error_detected = qlge_io_error_detected,
+ .slot_reset = qlge_io_slot_reset,
+ .resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err;
+
+ netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
+
+ if (netif_running(ndev)) {
+ err = ql_adapter_down(qdev);
+ if (!err)
+ return err;
+ }
+
+ ql_wol(qdev);
+ err = pci_save_state(pdev);
+ if (err)
+ return err;
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qlge_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (netif_running(ndev)) {
+ err = ql_adapter_up(qdev);
+ if (err)
+ return err;
+ }
+
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+ netif_device_attach(ndev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+ qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+ .name = DRV_NAME,
+ .id_table = qlge_pci_tbl,
+ .probe = qlge_probe,
+ .remove = qlge_remove,
+#ifdef CONFIG_PM
+ .suspend = qlge_suspend,
+ .resume = qlge_resume,
+#endif
+ .shutdown = qlge_shutdown,
+ .err_handler = &qlge_err_handler
+};
+
+module_pci_driver(qlge_driver);
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
new file mode 100644
index 000000000..7ad146080
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -0,0 +1,1284 @@
+#include "qlge.h"
+
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+ int status;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* get the data */
+ *data = ql_read32(qdev, PROC_DATA);
+exit:
+ return status;
+}
+
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* write the data to the data reg */
+ ql_write32(qdev, PROC_DATA, data);
+ /* trigger the write */
+ ql_write32(qdev, PROC_ADDR, reg);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+exit:
+ return status;
+}
+
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_write_mpi_reg(qdev, 0x00001010, 1);
+ return status;
+}
+
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
+static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int i, status;
+
+ status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ if (status)
+ return -EBUSY;
+ for (i = 0; i < mbcp->out_count; i++) {
+ status =
+ ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
+ &mbcp->mbox_out[i]);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
+ break;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+}
+
+/* Wait for a single mailbox command to complete.
+ * Returns zero on success.
+ */
+static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+{
+ int count = 100;
+ u32 value;
+
+ do {
+ value = ql_read32(qdev, STS);
+ if (value & STS_PI)
+ return 0;
+ mdelay(UDELAY_DELAY); /* 100ms */
+ } while (--count);
+ return -ETIMEDOUT;
+}
+
+/* Execute a single mailbox command.
+ * Caller must hold PROC_ADDR semaphore.
+ */
+static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int i, status;
+
+ /*
+ * Make sure there's nothing pending.
+ * This shouldn't happen.
+ */
+ if (ql_read32(qdev, CSR) & CSR_HRI)
+ return -EIO;
+
+ status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ if (status)
+ return status;
+
+ /*
+ * Fill the outbound mailboxes.
+ */
+ for (i = 0; i < mbcp->in_count; i++) {
+ status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
+ mbcp->mbox_in[i]);
+ if (status)
+ goto end;
+ }
+ /*
+ * Wake up the MPI firmware.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+end:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+ return status;
+}
+
+/* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode. We wake up a worker
+ * to handler processing this since a mailbox command
+ * will need to be sent to ACK the request.
+ */
+static int ql_idc_req_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
+ /* Get the status data and start up a thread to
+ * handle the request.
+ */
+ mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ } else {
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
+ }
+ return status;
+}
+
+/* Process an inter-device event completion.
+ * If good, signal the caller's completion.
+ */
+static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
+ ql_queue_fw_error(qdev);
+ } else
+ /* Wake up the sleeping mpi_idc_work thread that is
+ * waiting for this event.
+ */
+ complete(&qdev->ide_completion);
+
+ return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ mbcp->out_count = 2;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
+ return;
+ }
+
+ qdev->link_status = mbcp->mbox_out[1];
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
+
+ /* If we're coming back from an IDC event
+ * then set up the CAM and frame routing.
+ */
+ if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
+ status = ql_cam_route_initialize(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ return;
+ } else
+ clear_bit(QL_CAM_RT_SET, &qdev->flags);
+ }
+
+ /* Queue up a worker to check the frame
+ * size information, and fix it if it's not
+ * to our liking.
+ */
+ if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
+ set_bit(QL_PORT_CFG, &qdev->flags);
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker dpc.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 0);
+ }
+
+ ql_link_on(qdev);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 3;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
+
+ ql_link_off(qdev);
+}
+
+static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 5;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
+ else
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
+
+ return status;
+}
+
+static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 1;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
+ else
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
+
+ return status;
+}
+
+static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 6;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
+ else {
+ int i;
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
+ for (i = 0; i < mbcp->out_count; i++)
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
+
+ }
+
+ return status;
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 2;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
+ } else {
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
+ qdev->fw_rev_id = mbcp->mbox_out[1];
+ status = ql_cam_route_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ }
+}
+
+/* Process an async event and clear it unless it's an
+ * error condition.
+ * This can get called iteratively from the mpi_work thread
+ * when events arrive via an interrupt.
+ * It also gets called when a mailbox command is polling for
+ * it's completion. */
+static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ int orig_count = mbcp->out_count;
+
+ /* Just get mailbox zero for now. */
+ mbcp->out_count = 1;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ goto end;
+ }
+
+ switch (mbcp->mbox_out[0]) {
+
+ /* This case is only active when we arrive here
+ * as a result of issuing a mailbox command to
+ * the firmware.
+ */
+ case MB_CMD_STS_INTRMDT:
+ case MB_CMD_STS_GOOD:
+ case MB_CMD_STS_INVLD_CMD:
+ case MB_CMD_STS_XFC_ERR:
+ case MB_CMD_STS_CSUM_ERR:
+ case MB_CMD_STS_ERR:
+ case MB_CMD_STS_PARAM_ERR:
+ /* We can only get mailbox status if we're polling from an
+ * unfinished command. Get the rest of the status data and
+ * return back to the caller.
+ * We only end up here when we're polling for a mailbox
+ * command completion.
+ */
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ return status;
+
+ /* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode.
+ */
+ case AEN_IDC_REQ:
+ status = ql_idc_req_aen(qdev);
+ break;
+
+ /* Process and inbound IDC event.
+ * This will happen when we're trying to
+ * change tx/rx max frame size, change pause
+ * parameters or loopback mode.
+ */
+ case AEN_IDC_CMPLT:
+ case AEN_IDC_EXT:
+ status = ql_idc_cmplt_aen(qdev);
+ break;
+
+ case AEN_LINK_UP:
+ ql_link_up(qdev, mbcp);
+ break;
+
+ case AEN_LINK_DOWN:
+ ql_link_down(qdev, mbcp);
+ break;
+
+ case AEN_FW_INIT_DONE:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
+ return status;
+ }
+ ql_init_fw_done(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_IN:
+ ql_sfp_in(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_OUT:
+ ql_sfp_out(qdev, mbcp);
+ break;
+
+ /* This event can arrive at boot time or after an
+ * MPI reset if the firmware failed to initialize.
+ */
+ case AEN_FW_INIT_FAIL:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_ERR;
+ return status;
+ }
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
+ status = -EIO;
+ ql_queue_fw_error(qdev);
+ break;
+
+ case AEN_SYS_ERR:
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
+ ql_queue_fw_error(qdev);
+ status = -EIO;
+ break;
+
+ case AEN_AEN_LOST:
+ ql_aen_lost(qdev, mbcp);
+ break;
+
+ case AEN_DCBX_CHG:
+ /* Need to support AEN 8110 */
+ break;
+ default:
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ /* Clear the MPI firmware status. */
+ }
+end:
+ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ /* Restore the original mailbox count to
+ * what the caller asked for. This can get
+ * changed when a mailbox command is waiting
+ * for a response and an AEN arrives and
+ * is handled.
+ * */
+ mbcp->out_count = orig_count;
+ return status;
+}
+
+/* Execute a single mailbox command.
+ * mbcp is a pointer to an array of u32. Each
+ * element in the array contains the value for it's
+ * respective mailbox register.
+ */
+static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ unsigned long count;
+
+ mutex_lock(&qdev->mpi_mutex);
+
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+ /* Load the mailbox registers and wake up MPI RISC. */
+ status = ql_exec_mb_cmd(qdev, mbcp);
+ if (status)
+ goto end;
+
+
+ /* If we're generating a system error, then there's nothing
+ * to wait for.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
+ goto end;
+
+ /* Wait for the command to complete. We loop
+ * here because some AEN might arrive while
+ * we're waiting for the mailbox command to
+ * complete. If more than 5 seconds expire we can
+ * assume something is wrong. */
+ count = jiffies + HZ * MAILBOX_TIMEOUT;
+ do {
+ /* Wait for the interrupt to come in. */
+ status = ql_wait_mbx_cmd_cmplt(qdev);
+ if (status)
+ continue;
+
+ /* Process the event. If it's an AEN, it
+ * will be handled in-line or a worker
+ * will be spawned. If it's our completion
+ * we will catch it below.
+ */
+ status = ql_mpi_handler(qdev, mbcp);
+ if (status)
+ goto end;
+
+ /* It's either the completion for our mailbox
+ * command complete or an AEN. If it's our
+ * completion then get out.
+ */
+ if (((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_GOOD) ||
+ ((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_INTRMDT))
+ goto done;
+ } while (time_before(jiffies, count));
+
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
+ status = -ETIMEDOUT;
+ goto end;
+
+done:
+
+ /* Now we can clear the interrupt condition
+ * and look at our status.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+ if (((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_GOOD) &&
+ ((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_INTRMDT)) {
+ status = -EIO;
+ }
+end:
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
+ return status;
+}
+
+/* Get MPI firmware version. This will be used for
+ * driver banner and for ethtool info.
+ * Returns zero on success.
+ */
+int ql_mb_about_fw(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
+ status = -EIO;
+ }
+
+ /* Store the firmware version */
+ qdev->fw_rev_id = mbcp->mbox_out[1];
+
+ return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_get_fw_state(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
+ status = -EIO;
+ }
+
+ /* If bit zero is set in mbx 1 then the firmware is
+ * running, but not initialized. This should never
+ * happen.
+ */
+ if (mbcp->mbox_out[1] & 1) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+/* Send and ACK mailbox command to the firmware to
+ * let it continue with the change.
+ */
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 5;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
+ mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
+ mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
+ mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
+ mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 3;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
+ mbcp->mbox_in[1] = qdev->link_config;
+ mbcp->mbox_in[2] = qdev->max_frame_size;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
+ } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
+ status = -EIO;
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
+ qdev->link_config = mbcp->mbox_out[1];
+ qdev->max_frame_size = mbcp->mbox_out[2];
+ }
+ return status;
+}
+
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+ mbcp->mbox_in[1] = wol;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+ u8 *addr = qdev->ndev->dev_addr;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 8;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+ if (enable_wol) {
+ mbcp->mbox_in[1] = (u32)addr[0];
+ mbcp->mbox_in[2] = (u32)addr[1];
+ mbcp->mbox_in[3] = (u32)addr[2];
+ mbcp->mbox_in[4] = (u32)addr[3];
+ mbcp->mbox_in[5] = (u32)addr[4];
+ mbcp->mbox_in[6] = (u32)addr[5];
+ mbcp->mbox_in[7] = 0;
+ } else {
+ mbcp->mbox_in[1] = 0;
+ mbcp->mbox_in[2] = 1;
+ mbcp->mbox_in[3] = 1;
+ mbcp->mbox_in[4] = 1;
+ mbcp->mbox_in[5] = 1;
+ mbcp->mbox_in[6] = 1;
+ mbcp->mbox_in[7] = 0;
+ }
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* IDC - Inter Device Communication...
+ * Some firmware commands require consent of adjacent FCOE
+ * function. This function waits for the OK, or a
+ * counter-request for a little more time.i
+ * The firmware will complete the request if the other
+ * function doesn't respond.
+ */
+static int ql_idc_wait(struct ql_adapter *qdev)
+{
+ int status = -ETIMEDOUT;
+ long wait_time = 1 * HZ;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ do {
+ /* Wait here for the command to complete
+ * via the IDC process.
+ */
+ wait_time =
+ wait_for_completion_timeout(&qdev->ide_completion,
+ wait_time);
+ if (!wait_time) {
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
+ break;
+ }
+ /* Now examine the response from the IDC process.
+ * We might have a good completion or a request for
+ * more wait time.
+ */
+ if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
+ wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
+ } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
+ status = 0;
+ break;
+ } else {
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
+ status = -EIO;
+ break;
+ }
+ } while (wait_time);
+
+ return status;
+}
+
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+ mbcp->mbox_in[1] = led_config;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
+ status = -EIO;
+ } else
+ qdev->led_config = mbcp->mbox_out[1];
+
+ return status;
+}
+
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
+ mbcp->mbox_in[1] = control;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ /* This indicates that the firmware is
+ * already in the state we are trying to
+ * change it to.
+ */
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
+ }
+ return status;
+}
+
+/* Returns a negative error code or the mailbox command status. */
+static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ *control = 0;
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
+ *control = mbcp->mbox_in[1];
+ return status;
+ }
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_wait_fifo_empty(struct ql_adapter *qdev)
+{
+ int count = 5;
+ u32 mgmnt_fifo_empty;
+ u32 nic_fifo_empty;
+
+ do {
+ nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
+ ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
+ mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
+ if (nic_fifo_empty && mgmnt_fifo_empty)
+ return 0;
+ msleep(100);
+ } while (count-- > 0);
+ return -ETIMEDOUT;
+}
+
+/* API called in work thread context to set new TX/RX
+ * maximum frame size values to match MTU.
+ */
+static int ql_set_port_cfg(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_mb_set_port_cfg(qdev);
+ if (status)
+ return status;
+ status = ql_idc_wait(qdev);
+ return status;
+}
+
+/* The following routines are worker threads that process
+ * events that may sleep waiting for completion.
+ */
+
+/* This thread gets the maximum TX and RX frame size values
+ * from the firmware and, if necessary, changes them to match
+ * the MTU setting.
+ */
+void ql_mpi_port_cfg_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
+ int status;
+
+ status = ql_mb_get_port_cfg(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
+ goto err;
+ }
+
+ if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+ qdev->max_frame_size ==
+ CFG_DEFAULT_MAX_FRAME_SIZE)
+ goto end;
+
+ qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
+ qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
+ status = ql_set_port_cfg(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
+ goto err;
+ }
+end:
+ clear_bit(QL_PORT_CFG, &qdev->flags);
+ return;
+err:
+ ql_queue_fw_error(qdev);
+ goto end;
+}
+
+/* Process an inter-device request. This is issues by
+ * the firmware in response to another function requesting
+ * a change to the port. We set a flag to indicate a change
+ * has been made and then send a mailbox command ACKing
+ * the change request.
+ */
+void ql_mpi_idc_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_idc_work.work);
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ u32 aen;
+ int timeout;
+
+ aen = mbcp->mbox_out[1] >> 16;
+ timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
+
+ switch (aen) {
+ default:
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
+ break;
+ case MB_CMD_PORT_RESET:
+ case MB_CMD_STOP_FW:
+ ql_link_off(qdev);
+ case MB_CMD_SET_PORT_CFG:
+ /* Signal the resulting link up AEN
+ * that the frame routing and mac addr
+ * needs to be set.
+ * */
+ set_bit(QL_CAM_RT_SET, &qdev->flags);
+ /* Do ACK if required */
+ if (timeout) {
+ status = ql_mb_idc_ack(qdev);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
+ status = 0; /* success */
+ }
+ break;
+
+ /* These sub-commands issued by another (FCoE)
+ * function are requesting to do an operation
+ * on the shared resource (MPI environment).
+ * We currently don't issue these so we just
+ * ACK the request.
+ */
+ case MB_CMD_IOP_RESTART_MPI:
+ case MB_CMD_IOP_PREP_LINK_DOWN:
+ /* Drop the link, reload the routing
+ * table when link comes up.
+ */
+ ql_link_off(qdev);
+ set_bit(QL_CAM_RT_SET, &qdev->flags);
+ /* Fall through. */
+ case MB_CMD_IOP_DVR_START:
+ case MB_CMD_IOP_FLASH_ACC:
+ case MB_CMD_IOP_CORE_DUMP_MPI:
+ case MB_CMD_IOP_PREP_UPDATE_MPI:
+ case MB_CMD_IOP_COMP_UPDATE_MPI:
+ case MB_CMD_IOP_NONE: /* an IDC without params */
+ /* Do ACK if required */
+ if (timeout) {
+ status = ql_mb_idc_ack(qdev);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
+ status = 0; /* success */
+ }
+ break;
+ }
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_work.work);
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int err = 0;
+
+ mutex_lock(&qdev->mpi_mutex);
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+ while (ql_read32(qdev, STS) & STS_PI) {
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ mbcp->out_count = 1;
+ /* Don't continue if an async event
+ * did not complete properly.
+ */
+ err = ql_mpi_handler(qdev, mbcp);
+ if (err)
+ break;
+ }
+
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
+ ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_reset_work.work);
+ cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
+ ql_soft_reset_mpi_risc(qdev);
+}