summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/net/ethernet/qlogic
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/qlogic')
-rw-r--r--kernel/drivers/net/ethernet/qlogic/Kconfig16
-rw-r--r--kernel/drivers/net/ethernet/qlogic/Makefile2
-rw-r--r--kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c2
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed.h499
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.c847
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.h139
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_dev.c1814
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_dev_api.h283
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_hsi.h5291
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_hw.c776
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_hw.h263
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c798
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.c531
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.h110
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_int.c1144
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_int.h396
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_l2.c1704
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_main.c1149
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c860
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.h369
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h370
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_sp.h364
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c170
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c893
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qede/Makefile3
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qede/qede.h285
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qede/qede_ethtool.c385
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qede/qede_main.c2584
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h25
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c37
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c6
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c5
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c38
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h3
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c5
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c82
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c7
-rw-r--r--kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c9
46 files changed, 22214 insertions, 117 deletions
diff --git a/kernel/drivers/net/ethernet/qlogic/Kconfig b/kernel/drivers/net/ethernet/qlogic/Kconfig
index d49cba129..ddcfcab03 100644
--- a/kernel/drivers/net/ethernet/qlogic/Kconfig
+++ b/kernel/drivers/net/ethernet/qlogic/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_QLOGIC
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -93,4 +91,16 @@ config NETXEN_NIC
---help---
This enables the support for NetXen's Gigabit Ethernet card.
+config QED
+ tristate "QLogic QED 25/40/100Gb core driver"
+ depends on PCI
+ select ZLIB_INFLATE
+ ---help---
+ This enables the support for ...
+
+config QEDE
+ tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+ depends on QED
+ ---help---
+ This enables the support for ...
endif # NET_VENDOR_QLOGIC
diff --git a/kernel/drivers/net/ethernet/qlogic/Makefile b/kernel/drivers/net/ethernet/qlogic/Makefile
index b2a283d9a..cee90e05b 100644
--- a/kernel/drivers/net/ethernet/qlogic/Makefile
+++ b/kernel/drivers/net/ethernet/qlogic/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 87e073c6e..f90344677 100644
--- a/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -93,8 +93,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
- drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
static int
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/Makefile b/kernel/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 000000000..5c2fd5723
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed.h b/kernel/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 000000000..1292c3603
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,499 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+ QED_COAL_MODE_DISABLE,
+ QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+ u32 init_val;
+ bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+ QED_PCI_ETH,
+ QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+enum QED_RESOURCES {
+ QED_SB,
+ QED_L2_QUEUE,
+ QED_VPORT,
+ QED_RSS_ENG,
+ QED_PQ,
+ QED_RL,
+ QED_MAC,
+ QED_VLAN,
+ QED_ILT,
+ QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+ QED_PF_L2_QUE,
+ QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+ QED_PORT_MODE_DE_2X40G,
+ QED_PORT_MODE_DE_2X50G,
+ QED_PORT_MODE_DE_1X100G,
+ QED_PORT_MODE_DE_4X10G_F,
+ QED_PORT_MODE_DE_4X10G_E,
+ QED_PORT_MODE_DE_4X20G,
+ QED_PORT_MODE_DE_1X40G,
+ QED_PORT_MODE_DE_2X25G,
+ QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+ /* PCI personality */
+ enum qed_pci_personality personality;
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[QED_MAX_RESC];
+ u32 resc_num[QED_MAX_RESC];
+ u32 feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ u8 num_tc;
+ u8 offload_tc;
+ u8 non_offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ u32 vendor_id;
+ u32 device_id;
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+
+ struct qed_igu_info *p_igu_info;
+
+ u32 port_mode;
+ u32 hw_mode;
+};
+
+struct qed_hw_cid_data {
+ u32 cid;
+ bool b_cid_allocated;
+
+ /* Additional identifiers */
+ u16 opaque_fid;
+ u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct qed_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ struct mutex mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct qed_storm_stats {
+ struct storm_stats mstats;
+ struct storm_stats pstats;
+ struct storm_stats tstats;
+ struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+ struct fw_ver_info *fw_ver_info;
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+ void *token;
+ void (*func)(void *);
+};
+
+struct qed_hwfn {
+ struct qed_dev *cdev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ /* BAR access */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct qed_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct qed_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct qed_rt_data *rt_data;
+
+ /* SPQ */
+ struct qed_spq *p_spq;
+
+ /* EQ */
+ struct qed_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct qed_consq *p_consq;
+
+ /* Slow-Path definitions */
+ struct tasklet_struct *sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct qed_ptt *p_main_ptt;
+ struct qed_ptt *p_dpc_ptt;
+
+ struct qed_sb_sp_info *p_sp_sb;
+ struct qed_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ struct qed_pf_params pf_params;
+
+ /* Array of sb_info of all status blocks */
+ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct qed_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+ bool b_int_requested;
+
+ struct qed_mcp_info *mcp_info;
+
+ struct qed_hw_cid_data *p_tx_cids;
+ struct qed_hw_cid_data *p_rx_cids;
+
+ struct qed_dmae_info dmae_info;
+
+ /* QM init */
+ struct qed_qm_info qm_info;
+ struct qed_storm_stats storm_stats;
+
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+
+ struct qed_simd_fp_handler simd_proto_handler[64];
+
+ struct z_stream_s *stream;
+};
+
+struct pci_params {
+ int pm_cap;
+
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned int irq;
+ u8 pf_num;
+};
+
+struct qed_int_param {
+ u32 int_mode;
+ u8 num_vectors;
+ u8 min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ struct msix_entry *msix_table;
+ bool fp_initialized;
+ u8 fp_msix_base;
+ u8 fp_msix_cnt;
+};
+
+struct qed_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ u8 type;
+#define QED_DEV_TYPE_BB_A0 (0 << 0)
+#define QED_DEV_TYPE_MASK (0x3)
+#define QED_DEV_TYPE_SHIFT (0)
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum mf_mode mf_mode;
+#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[VER_SIZE];
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum qed_coalescing_mode int_coalescing_mode;
+ u8 rx_coalesce_usecs;
+ u8 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ u32 drv_type;
+
+ struct qed_eth_stats *reset_stats;
+ struct qed_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Linux specific here */
+ struct qede_dev *edev;
+ struct pci_dev *pdev;
+ int msg_enable;
+
+ struct pci_params pci_params;
+
+ struct qed_int_params int_params;
+
+ u8 protocol;
+#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+ /* Callbacks to protocol driver */
+ union {
+ struct qed_common_cb_ops *common;
+ struct qed_eth_cb_ops *eth;
+ } protocol_ops;
+ void *ops_cookie;
+
+ const struct firmware *firmware;
+};
+
+#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
+ QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+{
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+ return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
+ (cdev->regview) + \
+ (offset))
+
+#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val) \
+ writel((u32)val, (void __iomem *)((u8 __iomem *)\
+ (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+ u32 input_len, u8 *input_buf,
+ u32 max_size, u8 *unzip_buf);
+
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+
+#define QED_ETH_INTERFACE_VERSION 300
+
+#endif /* _QED_H */
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 000000000..7ccdb46c6
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_QM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct qed_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ size_t size;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ /* ILT shadow table */
+ struct qed_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+ u32 type, pf_cids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+ return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_qm_iids *iids)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 start_line, u32 total_size,
+ u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify thatits called only once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size,
+ p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val,
+ p_cli->last.val, p_blk->total_size,
+ p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 curr_line, total, pf_cids;
+ struct qed_qm_iids qm_iids;
+
+ memset(&qm_iids, 0, sizeof(qm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ curr_line = p_mngr->pf_start_line;
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC and QM clients */
+ pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ qed_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+ p_hwfn->qm_info.num_pqs, 0);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, QED_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+ }
+
+ return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_dma->size, p_dma->p_virt,
+ p_dma->p_phys);
+ p_dma->p_virt = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
+{
+ struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left;
+
+ if (!p_blk->total_size)
+ return 0;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = min_t(u32, sz_left,
+ p_blk->real_size_in_page);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_phys,
+ GFP_KERNEL);
+ if (!p_virt)
+ return -ENOMEM;
+ memset(p_virt, 0, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *clients = p_mngr->clients;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 size, i, j;
+ int rc;
+
+ size = qed_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+ rc = -ENOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct qed_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
+
+ return 0;
+
+ilt_shadow_fail:
+ qed_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ kfree(p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+ }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 size;
+
+ if (cid_cnt == 0)
+ continue;
+
+ size = DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_mngr->acquired[type].cid_map)
+ goto cid_map_fail;
+
+ p_mngr->acquired[type].max_count = cid_cnt;
+ p_mngr->acquired[type].start_cid = start_cid;
+
+ p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_mngr->acquired[type].start_cid,
+ p_mngr->acquired[type].max_count);
+ start_cid += cid_cnt;
+ }
+
+ return 0;
+
+cid_map_fail:
+ qed_cid_map_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate the ILT shadow table */
+ rc = qed_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = qed_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return 0;
+
+tables_alloc_fail:
+ qed_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ qed_cid_map_free(p_hwfn);
+ qed_ilt_shadow_free(p_hwfn);
+ kfree(p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+ if (cid_cnt == 0)
+ continue;
+
+ memset(p_mngr->acquired[type].cid_map, 0,
+ DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long));
+ }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_iids iids;
+
+ memset(&iids, 0, sizeof(iids));
+ qed_cxt_qm_iids(p_hwfn, &iids);
+
+ memset(&params, 0, sizeof(params));
+ params.port_id = p_hwfn->port_id;
+ params.pf_id = p_hwfn->rel_pf_id;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.is_first_pf = p_hwfn->first_on_engine;
+ params.num_pf_cids = iids.cids;
+ params.start_pq = qm_info->start_pq;
+ params.num_pf_pqs = qm_info->num_pqs;
+ params.start_vport = qm_info->num_vports;
+ params.pf_wfq = qm_info->pf_wfq;
+ params.pf_rl = qm_info->pf_rl;
+ params.pq_params = qm_info->qm_pq_params;
+ params.vport_params = qm_info->qm_vport_params;
+
+ qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ union qed_qm_pq_params pq_params;
+ u16 pq;
+
+ /* XCM pure-LB queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+ return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ /* 5 - PF */
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg,
+ ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ struct qed_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ qed_ilt_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+
+ /** Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+ qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+ qed_qm_init_pf(p_hwfn);
+ qed_cm_init_pf(p_hwfn);
+ qed_dq_init_pf(p_hwfn);
+ qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+ p_mngr->acquired[type].max_count);
+
+ if (rel_cid >= p_mngr->acquired[type].max_count) {
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+ *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+ return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+ u32 cid,
+ enum protocol_type *p_type)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ enum protocol_type p;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (p = 0; p < MAX_CONN_TYPES; p++) {
+ p_map = &p_mngr->acquired[p];
+
+ if (!p_map->cid_map)
+ continue;
+ if (cid >= p_map->start_cid &&
+ cid < p_map->start_cid + p_map->max_count)
+ break;
+ }
+ *p_type = p;
+
+ if (p == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+ return false;
+ }
+
+ rel_cid = cid - p_map->start_cid;
+ if (!test_bit(rel_cid, p_map->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+ return false;
+ }
+ return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_mngr->acquired[type].start_cid;
+ __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+ if (!b_acquired)
+ return -EINVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return -EINVAL;
+
+ p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+ return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons);
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 000000000..c8e1f5e5c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+ QED_ELEM_CXT,
+ QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_dev.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 000000000..817bbd547
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,1814 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ cdev->dp_level = dp_level;
+ cdev->dp_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->cdev = cdev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ mutex_init(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ cdev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 */
+ cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ kfree(qm_info->qm_pq_params);
+ qm_info->qm_pq_params = NULL;
+ kfree(qm_info->qm_vport_params);
+ qm_info->qm_vport_params = NULL;
+ kfree(qm_info->qm_port_params);
+ qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+ int i;
+
+ kfree(cdev->fw_data);
+ cdev->fw_data = NULL;
+
+ kfree(cdev->reset_stats);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ kfree(p_hwfn->p_tx_cids);
+ p_hwfn->p_tx_cids = NULL;
+ kfree(p_hwfn->p_rx_cids);
+ p_hwfn->p_rx_cids = NULL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+ qed_spq_free(p_hwfn);
+ qed_eq_free(p_hwfn, p_hwfn->p_eq);
+ qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_int_free(p_hwfn);
+ qed_dmae_info_free(p_hwfn);
+ }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_port_params *p_qm_port;
+ u8 num_vports, i, vport_id, num_ports;
+ u16 num_pqs, multi_cos_tcs = 1;
+
+ memset(qm_info, 0, sizeof(*qm_info));
+
+ num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+ /* Sanity checking that setup requires legal number of resources */
+ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn,
+ "Need too many Physical queues - 0x%04x when only %04x are available\n",
+ num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+ return -EINVAL;
+ }
+
+ /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+ */
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ num_pqs, GFP_ATOMIC);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ num_vports, GFP_ATOMIC);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ MAX_NUM_PORTS, GFP_ATOMIC);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ /* First init per-TC PQs */
+ for (i = 0; i < multi_cos_tcs; i++) {
+ struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ }
+
+ /* Then init pure-LB PQ */
+ qm_info->pure_lb_pq = i;
+ qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[i].wrr_group = 1;
+ i++;
+
+ qm_info->offload_pq = 0;
+ qm_info->num_pqs = num_pqs;
+ qm_info->num_vports = num_vports;
+
+ /* Initialize qm port parameters */
+ num_ports = p_hwfn->cdev->num_ports_in_engines;
+ for (i = 0; i < num_ports; i++) {
+ p_qm_port = &qm_info->qm_port_params[i];
+ p_qm_port->active = 1;
+ p_qm_port->num_active_phys_tcs = 4;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+
+ qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ qm_info->pf_wfq = 0;
+ qm_info->pf_rl = 0;
+ qm_info->vport_rl_en = 1;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ kfree(qm_info->qm_pq_params);
+ kfree(qm_info->qm_vport_params);
+ kfree(qm_info->qm_port_params);
+
+ return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+ struct qed_consq *p_consq;
+ struct qed_eq *p_eq;
+ int i, rc = 0;
+
+ cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+ if (!cdev->fw_data)
+ return -ENOMEM;
+
+ /* Allocate Memory for the Queue->CID mapping */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ int tx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ int rx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+ if (!p_hwfn->p_tx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Tx Cids\n");
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+ if (!p_hwfn->p_rx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Rx Cids\n");
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* First allocate the context manager structure */
+ rc = qed_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = qed_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Prepare and process QM requirements */
+ rc = qed_init_qm_info(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Compute the ILT client partition */
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = qed_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = qed_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ p_eq = qed_eq_alloc(p_hwfn, 256);
+ if (!p_eq) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+ p_hwfn->p_eq = p_eq;
+
+ p_consq = qed_consq_alloc(p_hwfn);
+ if (!p_consq) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+ p_hwfn->p_consq = p_consq;
+
+ /* DMA info initialization */
+ rc = qed_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dmae_info structure\n");
+ goto alloc_err;
+ }
+ }
+
+ cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+ if (!cdev->reset_stats) {
+ DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ return 0;
+
+alloc_err:
+ qed_resc_free(cdev);
+ return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_setup(p_hwfn);
+ qed_spq_setup(p_hwfn);
+ qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+ qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ int rc = -EBUSY;
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+ command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+ command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+ command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+ command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(
+ p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, command);
+
+ qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ msleep(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = 0;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ hw_mode = (1 << MODE_BB_A0);
+
+ switch (p_hwfn->cdev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+ return;
+ }
+
+ switch (p_hwfn->cdev->mf_mode) {
+ case SF:
+ hw_mode |= 1 << MODE_SF;
+ break;
+ case MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ case MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+ hw_mode |= 1 << MODE_SF;
+ }
+
+ hw_mode |= 1 << MODE_ASIC;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ if (!p_block->is_pf)
+ continue;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id,
+ 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_common_rt_init_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc = 0;
+
+ qed_init_cau_rt_data(cdev);
+
+ /* Program GTT windows */
+ qed_gtt_init(p_hwfn);
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.pf_rl_en = qm_info->pf_rl_en;
+ params.pf_wfq_en = qm_info->pf_wfq_en;
+ params.vport_rl_en = qm_info->vport_rl_en;
+ params.vport_wfq_en = qm_info->vport_wfq_en;
+ params.port_params = qm_info->qm_port_params;
+
+ qed_qm_common_rt_init(p_hwfn, &params);
+
+ qed_cxt_hw_init_common(p_hwfn);
+
+ /* Close gate from NIG to BRB/Storm; By default they are open, but
+ * we close them to prevent NIG from passing data to reset blocks.
+ * Should have been done in the ENGINE phase, but init-tool lacks
+ * proper port-pretend capabilities.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_unpretend(p_hwfn, p_ptt);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ /* Disable relaxed ordering in the PCI config space */
+ qed_wr(p_hwfn, p_ptt, 0x20b4,
+ qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+ return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100;
+ }
+
+ qed_cxt_hw_init_pf(p_hwfn);
+
+ qed_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+ }
+
+ /* Protocl Configuration */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ if (rc != 0)
+ return rc;
+
+ /* PF Init sequence */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ }
+ return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ usleep_range(50, 60);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data)
+{
+ struct qed_storm_stats *p_stat;
+ u32 load_code, param, *p_address;
+ int rc, mfw_rc, i;
+ u8 fw_vport = 0;
+
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+ if (rc != 0)
+ return rc;
+
+ /* Enable DMAE in PXP */
+ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ qed_calc_hw_mode(p_hwfn);
+
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_code);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ return rc;
+ }
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+ rc, load_code);
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode,
+ b_hw_start, int_mode,
+ allow_npar_tx_switch);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, &param);
+ if (rc)
+ return rc;
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+
+ /* init PF stats */
+ p_stat = &p_hwfn->storm_stats;
+ p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+ p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+ p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+ p_address = &p_stat->tstats.address;
+ *p_address = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ }
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+ int rc = 0, t_rc;
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ rc = qed_sp_pf_stop(p_hwfn);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ /* Disable Attention Generation */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt,
+ false);
+ if (t_rc != 0)
+ rc = t_rc;
+
+ return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 reg,
+ bool expected)
+{
+ u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+ if (assert_val != expected) {
+ DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ reg, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+ int rc = 0;
+ u32 unload_resp, unload_param;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+ /* Check for incorrect states */
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ TCFC_REG_STRONG_ENABLE_PF, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ CCFC_REG_STRONG_ENABLE_PF, 0);
+
+ /* Send unload command to MCP */
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_REQ,
+ DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+ unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_DONE,
+ 0, &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+ qed_ptt_pool_free(p_hwfn);
+ kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* clear indirect access */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+ /* Clean Previous errors if such exist */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ int num_features = 1;
+
+ feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+ num_features,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE));
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+ num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ int num_funcs, i;
+
+ num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+ : p_hwfn->cdev->num_ports_in_engines;
+
+ resc_num[QED_SB] = min_t(u32,
+ (MAX_SB_PER_PATH_BB / num_funcs),
+ qed_int_get_num_sbs(p_hwfn, NULL));
+ resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+ resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+ resc_num[QED_RL] = 8;
+ resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+ resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+ num_funcs;
+ resc_num[QED_ILT] = 950;
+
+ for (i = 0; i < QED_MAX_RESC; i++)
+ resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+ qed_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "The numbers for each resource are:\n"
+ "SB = %d start = %d\n"
+ "L2_QUEUE = %d start = %d\n"
+ "VPORT = %d start = %d\n"
+ "PQ = %d start = %d\n"
+ "RL = %d start = %d\n"
+ "MAC = %d start = %d\n"
+ "VLAN = %d start = %d\n"
+ "ILT = %d start = %d\n",
+ p_hwfn->hw_info.resc_num[QED_SB],
+ p_hwfn->hw_info.resc_start[QED_SB],
+ p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_num[QED_VPORT],
+ p_hwfn->hw_info.resc_start[QED_VPORT],
+ p_hwfn->hw_info.resc_num[QED_PQ],
+ p_hwfn->hw_info.resc_start[QED_PQ],
+ p_hwfn->hw_info.resc_num[QED_RL],
+ p_hwfn->hw_info.resc_start[QED_RL],
+ p_hwfn->hw_info.resc_num[QED_MAC],
+ p_hwfn->hw_info.resc_start[QED_MAC],
+ p_hwfn->hw_info.resc_num[QED_VLAN],
+ p_hwfn->hw_info.resc_start[QED_VLAN],
+ p_hwfn->hw_info.resc_num[QED_ILT],
+ p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ struct qed_mcp_link_params *link;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ /* Read Vendor Id / Device Id */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, pci_id);
+ p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+ NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+ offsetof(struct nvm_cfg1_func, device_id);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ if (IS_MF(p_hwfn)) {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+ } else {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, speed_cap_mask));
+ link->speed.advertised_speeds =
+ link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+ link->speed.advertised_speeds;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+ link_temp);
+ }
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->cdev->mf_mode = MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->cdev->mf_mode = MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+ p_hwfn->cdev->mf_mode = SF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->cdev->mf_mode);
+
+ return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ u32 port_mode;
+ int rc;
+
+ /* Read the port mode */
+ port_mode = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NW_PORT_MODE_BB_B0);
+
+ if (port_mode < 3) {
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->cdev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+
+ qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (qed_mcp_is_init(p_hwfn))
+ ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac);
+ else
+ eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ enum qed_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+ qed_hw_get_resc(p_hwfn);
+
+ return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+ u32 tmp;
+
+ cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_REV);
+ MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+ cdev->num_hwfns = 2;
+ } else {
+ cdev->num_hwfns = 1;
+ }
+
+ cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+ cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+ DP_INFO(cdev->hwfns,
+ "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ cdev->chip_num, cdev->chip_rev,
+ cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
+ enum qed_pci_personality personality)
+{
+ int rc = 0;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ return -EINVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ rc = qed_hw_hwfn_prepare(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ goto err0;
+ }
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id)
+ qed_get_dev_info(p_hwfn->cdev);
+
+ /* Initialize MCP structure */
+ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = qed_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ goto err2;
+ }
+
+ return rc;
+err2:
+ qed_mcp_free(p_hwfn);
+err1:
+ qed_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ u8 bar_id)
+{
+ u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
+ : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+ /* Get the BAR size(in KB) from hardware given val */
+ return 1 << (val + 15);
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int rc;
+
+ /* Store the precompiled init data ptrs */
+ qed_init_iro_array(cdev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(p_hwfn,
+ cdev->regview,
+ cdev->doorbells, personality);
+ if (rc)
+ return rc;
+
+ personality = p_hwfn->hw_info.personality;
+
+ /* Initialize the rest of the hwfns */
+ if (cdev->num_hwfns > 1) {
+ void __iomem *p_regview, *p_doorbell;
+ u8 __iomem *addr;
+
+ /* adjust bar offset for second engine */
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ p_regview = addr;
+
+ /* adjust doorbell bar offset for second engine */
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ p_doorbell = addr;
+
+ /* prepare second hw function */
+ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
+ p_doorbell, personality);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
+ if (rc) {
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
+ }
+
+ return rc;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_init_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ }
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain)
+{
+ dma_addr_t p_pbl_phys = 0;
+ void *p_pbl_virt = NULL;
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
+ u16 page_cnt = 0;
+ size_t size;
+
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ size = page_cnt * QED_CHAIN_PAGE_SIZE;
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+ goto nomem;
+ }
+
+ if (mode == QED_CHAIN_MODE_PBL) {
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys,
+ GFP_KERNEL);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+ goto nomem;
+ }
+
+ qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use,
+ p_pbl_phys, p_pbl_virt);
+ } else {
+ qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use, mode);
+ }
+
+ return 0;
+
+nomem:
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PAGE_SIZE,
+ p_virt, p_phys);
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+ p_pbl_virt, p_pbl_phys);
+
+ return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ size_t size;
+
+ if (!p_chain->p_virt_addr)
+ return;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->pbl.p_virt_table,
+ p_chain->pbl.p_phys_table);
+ }
+
+ size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ int i, j;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct tstorm_per_port_stat tstats;
+ struct port_stats port_stats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ p_hwfn->storm_stats.mstats.address,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ p_hwfn->storm_stats.ustats.address,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ p_hwfn->storm_stats.pstats.address,
+ p_hwfn->storm_stats.pstats.len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ p_hwfn->storm_stats.tstats.address,
+ p_hwfn->storm_stats.tstats.len);
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ if (p_hwfn->mcp_info)
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+ stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+ stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+ stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+ stats->rx_64_byte_packets += port_stats.pmm.r64;
+ stats->rx_127_byte_packets += port_stats.pmm.r127;
+ stats->rx_255_byte_packets += port_stats.pmm.r255;
+ stats->rx_511_byte_packets += port_stats.pmm.r511;
+ stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ stats->rx_crc_errors += port_stats.pmm.rfcs;
+ stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ stats->rx_pause_frames += port_stats.pmm.rxpf;
+ stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ stats->rx_align_errors += port_stats.pmm.raln;
+ stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ stats->rx_oversize_packets += port_stats.pmm.rovr;
+ stats->rx_jabbers += port_stats.pmm.rjbr;
+ stats->rx_undersize_packets += port_stats.pmm.rund;
+ stats->rx_fragments += port_stats.pmm.rfrg;
+ stats->tx_64_byte_packets += port_stats.pmm.t64;
+ stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ stats->tx_pause_frames += port_stats.pmm.txpf;
+ stats->tx_pfc_frames += port_stats.pmm.txpp;
+ stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ stats->tx_total_collisions += port_stats.pmm.tncl;
+ stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+
+ for (j = 0; j < 8; j++) {
+ stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ __qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.mstats.address,
+ &mstats,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.ustats.address,
+ &ustats,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.pstats.address,
+ &pstats,
+ p_hwfn->storm_stats.pstats.len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ __qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ DP_NOTICE(p_hwfn,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+ return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_VPORT);
+ max = min + RESC_NUM(p_hwfn, QED_VPORT);
+ DP_NOTICE(p_hwfn,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+ return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+ DP_NOTICE(p_hwfn,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 000000000..e29a3ba6c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ * Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct qed_dmae_params {
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 000000000..b2f8e854d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_RESERVED,
+ COMMON_EVENT_RESERVED2,
+ COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_RESERVED4,
+ COMMON_EVENT_RESERVED5,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_RESERVED,
+ COMMON_RAMROD_RESERVED2,
+ COMMON_RAMROD_RESERVED3,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+ struct event_ring_next_addr next_addr;
+};
+
+enum personality_type {
+ PERSONALITY_RESERVED,
+ PERSONALITY_RESERVED2,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RESERVED3,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_RESERVED4,
+ MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ u32 outer_tag;
+ u8 reserved0[4];
+};
+
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair fcoe_irregular_pkt;
+ struct regpair roce_irregular_pkt;
+ struct regpair eth_irregular_pkt;
+ struct regpair toe_irregular_pkt;
+ struct regpair preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_PTU,
+ BLOCK_DMAE,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to copmletion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_RESERVED,
+ MODE_RESERVED2,
+ MODE_ASIC,
+ MODE_RESERVED3,
+ MODE_RESERVED4,
+ MODE_RESERVED5,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_40G,
+ MODE_100G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_RESERVED,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN 19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS 8
+#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS 18
+#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS 6
+#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS 8
+#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM 3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS 3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS 0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT 0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS 2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR 0
+#define IDLE_CHK_QM_RD_WR_BANK 1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+ u32 offset;
+ u32 length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+ BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_IRO /* internal RAM offsets array */,
+ MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+ CHIP_BB_A0 /* BB A0 chip ID */,
+ CHIP_BB_B0 /* BB B0 chip ID */,
+ CHIP_K2 /* AH chip ID */,
+ MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+ IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+ INIT_COMPARISON_EQ /* init value is included in the init command */,
+ INIT_COMPARISON_OR /* init value is all zeros */,
+ INIT_COMPARISON_AND /* init value is an array of values */,
+ MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_COMP_MASK 0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 7
+#define INIT_READ_OP_POLL_MASK 0x1
+#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_IF_MODE,
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+ u8 num_active_phys_tcs;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+ u8 reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ u32 vport_rl;
+ u16 vport_wfq;
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+ 0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+ 0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+ 0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+ 0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+ 0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+ 0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+ 0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+ 0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+ 0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+ 0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with
+ * arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
+ ((port_id) * \
+ IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
+ ((vf_id) * \
+ IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
+ ((pf_id) * \
+ IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
+ ((global_queue_id) * \
+ IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
+ ((core_rx_queue_id) * \
+ IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+ ((core_rx_q_id) * \
+ IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+ ((core_rx_q_id) * \
+ IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+ ((core_txst_id) * \
+ IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+ ((stat_counter_id) * \
+ IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
+ ((queue_id) * \
+ IRO[17].m1))
+#define MSTORM_PRODS_SIZE (IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
+ ((stat_counter_id) * \
+ IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
+ ((queue_id) * \
+ IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
+ ((stat_counter_id) * \
+ IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
+ ((pf_id) * \
+ IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
+ ((queue_id) * \
+ IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
+ ((rss_id) * \
+ IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
+ ((rss_id) * \
+ IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
+ ((pf_id) * \
+ IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
+ ((cmdq_queue_id) * \
+ IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
+ ((rq_queue_id) * \
+ IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
+ ((stat_counter_id) * \
+ IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
+ ((stat_counter_id) * \
+ IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x4448, 0x60, 0x0, 0x0, 0x60 },
+ { 0x498, 0x8, 0x0, 0x0, 0x4 },
+ { 0x494, 0x0, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5808, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb100, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
+ { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2790, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+};
+
+enum eth_filter_action {
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REPLACE,
+ MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+struct eth_filter_cmd_header {
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+enum eth_filter_type {
+ ETH_FILTER_TYPE_MAC,
+ ETH_FILTER_TYPE_VLAN,
+ ETH_FILTER_TYPE_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC,
+ ETH_FILTER_TYPE_INNER_VLAN,
+ ETH_FILTER_TYPE_INNER_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_VNI,
+ MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+ ETH_RAMROD_RESERVED,
+ ETH_RAMROD_RESERVED2,
+ ETH_RAMROD_RESERVED3,
+ ETH_RAMROD_RESERVED4,
+ ETH_RAMROD_RESERVED5,
+ ETH_RAMROD_RESERVED6,
+ ETH_RAMROD_RESERVED7,
+ ETH_RAMROD_RESERVED8,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED,
+ ETH_VPORT_RSS_MODE_REGULAR,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+ u64 reserved[2];
+};
+
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[4];
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 init_sge_ring_flg;
+ u8 vport_id;
+ u8 pxp_tph_valid_sge;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[6];
+ struct regpair sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 tc;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ u8 pxp_st_hint;
+ u8 reserved1[3];
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ struct regpair pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ __le16 default_vlan;
+ u8 untagged;
+ u8 reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_sge_param_flg;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 reserved;
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192 /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
+ offsetof(struct \
+ mcp_public_data, \
+ sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+ u32 speed;
+#define PMM_SPEED_AUTONEG 0
+
+ u32 pause; /* bitmask */
+#define PMM_PAUSE_NONE 0x0
+#define PMM_PAUSE_AUTONEG 0x1
+#define PMM_PAUSE_RX 0x2
+#define PMM_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define PMM_LOOPBACK_NONE 0
+#define PMM_LOOPBACK_INT_PHY 1
+#define PMM_LOOPBACK_EXT_PHY 2
+#define PMM_LOOPBACK_EXT 3
+#define PMM_LOOPBACK_MAC 4
+
+ /* features */
+ u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct pmm_stats pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+/**************************************
+* LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+enum lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC 4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path;
+#define MAX_PATH_BIG_BEAR 2
+#define MAX_PATH_K2 1
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox *
+****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP \
+ 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED \
+ 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+ 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN BIT(0)
+#define LFA_FORCE_INIT BIT(1)
+#define LFA_LOOPBACK_MISMATCH BIT(2)
+#define LFA_SPEED_MISMATCH BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH BIT(4)
+#define LFA_ADV_SPEED_MISMATCH BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+ struct lldp_config_params_s lldp_config_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 reserved[8];
+
+ u32 config;
+
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct pmm_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+* Incremental Aggregative
+* 8-bit MFW counter per message
+* 8-bit ack-counter per message
+* Capabilities
+* Provides up to 256 aggregative message per type
+* Provides 4 message types in dword
+* Message type pointers to byte offset
+* Backward Compatibility by using sizeof for the counters.
+* No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+ struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+
+ u32 engineering_change[3]; /* 0x4 */
+
+ u32 manufacturing_id; /* 0x10 */
+
+ u32 serial_number[4]; /* 0x14 */
+
+ u32 pcie_cfg; /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+
+ u32 mgmt_traffic; /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+
+ u32 core_cfg; /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+
+ u32 e_lane_cfg1; /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 e_lane_cfg2; /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET 12
+#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+
+ u32 f_lane_cfg1; /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 f_lane_cfg2; /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+
+ u32 eagle_preemphasis; /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 eagle_driver_current; /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 falcon_driver_current; /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 pci_id; /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+
+ u32 pci_subsys_id; /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+
+ u32 bar; /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+
+ u32 eagle_txfir_main; /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 eagle_txfir_post; /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 falcon_txfir_main; /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 falcon_txfir_post; /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 manufacture_ver; /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+
+ u32 manufacture_time; /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+
+ u32 led_global_settings; /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+
+ u32 generic_cont1; /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+
+ u32 mbi_version; /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date; /* 0x80 */
+
+ u32 misc_sig; /* 0x84 */
+
+ /* Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+
+ u32 reserved[46]; /* 0x88 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[30]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 power_dissipated; /* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
+
+ u32 power_consumed; /* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
+
+ u32 generic_cont0; /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+
+ u32 pcie_cfg; /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+
+ u32 features; /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+
+ u32 speed_cap_mask; /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
+
+ u32 link_settings; /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
+
+ u32 phy_cfg; /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
+
+ u32 mgmt_traffic; /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
+
+ u32 ext_phy; /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+
+ u32 mba_cfg1; /* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK 0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET 0
+#define NVM_CFG1_PORT_MBA_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_ENABLED 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED5_2K 0x1
+#define NVM_CFG1_PORT_RESERVED5_4K 0x2
+#define NVM_CFG1_PORT_RESERVED5_8K 0x3
+#define NVM_CFG1_PORT_RESERVED5_16K 0x4
+#define NVM_CFG1_PORT_RESERVED5_32K 0x5
+#define NVM_CFG1_PORT_RESERVED5_64K 0x6
+#define NVM_CFG1_PORT_RESERVED5_128K 0x7
+#define NVM_CFG1_PORT_RESERVED5_256K 0x8
+#define NVM_CFG1_PORT_RESERVED5_512K 0x9
+#define NVM_CFG1_PORT_RESERVED5_1M 0xA
+#define NVM_CFG1_PORT_RESERVED5_2M 0xB
+#define NVM_CFG1_PORT_RESERVED5_4M 0xC
+#define NVM_CFG1_PORT_RESERVED5_8M 0xD
+#define NVM_CFG1_PORT_RESERVED5_16M 0xE
+#define NVM_CFG1_PORT_RESERVED5_32M 0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+
+ u32 mba_cfg2; /* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+
+ u32 vf_cfg; /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED6_4K 0x1
+#define NVM_CFG1_PORT_RESERVED6_8K 0x2
+#define NVM_CFG1_PORT_RESERVED6_16K 0x3
+#define NVM_CFG1_PORT_RESERVED6_32K 0x4
+#define NVM_CFG1_PORT_RESERVED6_64K 0x5
+#define NVM_CFG1_PORT_RESERVED6_128K 0x6
+#define NVM_CFG1_PORT_RESERVED6_256K 0x7
+#define NVM_CFG1_PORT_RESERVED6_512K 0x8
+#define NVM_CFG1_PORT_RESERVED6_1M 0x9
+#define NVM_CFG1_PORT_RESERVED6_2M 0xA
+#define NVM_CFG1_PORT_RESERVED6_4M 0xB
+#define NVM_CFG1_PORT_RESERVED6_8M 0xC
+#define NVM_CFG1_PORT_RESERVED6_16M 0xD
+#define NVM_CFG1_PORT_RESERVED6_32M 0xE
+#define NVM_CFG1_PORT_RESERVED6_64M 0xF
+
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+
+ u32 led_port_settings; /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
+
+ u32 transceiver_00; /* 0x40 */
+
+ /* Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+
+ u32 reserved[133]; /* 0x44 */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+
+ u32 rsrv1; /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+
+ u32 rsrv2; /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+
+ u32 device_id; /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+
+ u32 cmn_cfg; /* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+
+ u32 pci_cfg; /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+
+ u32 reserved[9]; /* 0x2C */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define NVM_CRC_SIZE (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region *
+****************************************************************************/
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len; /* boot code length (in dwords) */
+ u32 code_start_addr;
+ u32 crc; /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region *
+****************************************************************************/
+struct nvm_code_entry {
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+ u32 sram_start_addr;
+ u32 sram_run_addr; /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ (_num_images - \
+ 1) * sizeof(struct nvm_code_entry) + \
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ /* This array length depends on the number of VPD fields */
+ u8 vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP *
+****************************************************************************/
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
+#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
+
+#define LIM_MAX_SIZE ((2 * \
+ FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) - \
+ NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+ FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+ ((idx == \
+ NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+ MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+/* Address
+ * +-------------------+ 0x000000
+ * | Bootstrap: |
+ * | magic_number |
+ * | sram_start_addr |
+ * | code_len |
+ * | code_start_addr |
+ * | crc |
+ * +-------------------+ 0x000014
+ * | rsrv |
+ * +-------------------+ 0x000040
+ * | LIM |
+ * +-------------------+ 0x002000
+ * | Dir1 |
+ * +-------------------+ 0x003000
+ * | Dir2 |
+ * +-------------------+ 0x004000
+ * | MIM1 |
+ * +-------------------+ 0x130000
+ * | MIM2 |
+ * +-------------------+ 0x25C000
+ * | Rest Images: |
+ * | TIM1/2 |
+ * | MFW_TRACE1/2 |
+ * | Eagle/Falcon FW |
+ * | PCIE/AVS FW |
+ * | MBA/CCM/L2B |
+ * | VPD |
+ * | optic_modules |
+ * | ... |
+ * +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+ /* NVM Offset (size) */
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+
+ /* MIM1_IMAGE 0x004000 (0x12c000) */
+ /* MIM2_IMAGE 0x130000 (0x12c000) */
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+}; /* 0x134 */
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+
+ /* This array length depends on the no_hw_sets */
+ struct hw_set_info hw_sets[1];
+};
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 000000000..ffa99273b
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+
+struct qed_ptt {
+ struct list_head list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+};
+
+struct qed_ptt_pool {
+ struct list_head free_list;
+ spinlock_t lock; /* ptt synchronized access */
+ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+ GFP_ATOMIC);
+ int i;
+
+ if (!p_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ if (i >= RESERVED_PTT_MAX)
+ list_add(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ spin_lock_init(&p_pool->lock);
+
+ return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+ }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+ struct qed_ptt, list_entry);
+ list_del(&p_ptt->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+ return p_ptt;
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+ usleep_range(1000, 2000);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+ return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, offset),
+ le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+ return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr,
+ size_t n,
+ bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ size_t quota, done = 0;
+ u32 __iomem *reg_addr;
+
+ while (done < n) {
+ quota = min_t(size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+ done += quota;
+ }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest, u32 hw_addr, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, void *src, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct qed_dmae_params *p_params)
+{
+ u32 opcode = 0;
+ u16 opcodeB = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) <<
+ DMAE_CMD_SRC_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT);
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) <<
+ DMAE_CMD_DST_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT);
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+ opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+ opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+ /* reset source address in next go */
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ /* reset dest address in next go */
+ opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+ DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+ opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+
+ opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+ DMAE_CMD_DST_VF_ID_SHIFT);
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ int qed_status = 0;
+
+ /* verify address is not NULL */
+ if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+ ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ DP_NOTICE(p_hwfn,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and the rest are result registers
+ * (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)command) + i) : 0;
+
+ qed_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ qed_dmae_idx_to_go_cmd(idx_cmd),
+ DMAE_GO_VALUE);
+
+ return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_addr,
+ GFP_KERNEL);
+ if (!*p_comp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_addr, GFP_KERNEL);
+ if (!*p_cmd) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_addr, GFP_KERNEL);
+ if (!*p_buff) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return 0;
+err:
+ qed_dmae_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys);
+ p_hwfn->dmae_info.p_completion_word = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys);
+ p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+ }
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+ u32 wait_cnt = 0;
+ u32 wait_cnt_limit = 10000;
+
+ int qed_status = 0;
+
+ barrier();
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ udelay(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ qed_status = -EBUSY;
+ break;
+ }
+
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ barrier();
+ }
+
+ if (qed_status == 0)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ int qed_status = 0;
+
+ switch (src_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(uintptr_t)src_addr,
+ length * sizeof(u32));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dst_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->length = cpu_to_le16((u16)length);
+
+ qed_dmae_post_command(p_hwfn, p_ptt);
+
+ qed_status = qed_dmae_operation_wait(p_hwfn);
+
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+ src_addr,
+ dst_addr,
+ length);
+ return qed_status;
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+ memcpy((void *)(uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length * sizeof(u32));
+
+ return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ int qed_status = 0;
+ u32 offset = 0;
+
+ qed_dmae_opcode(p_hwfn,
+ (src_type == QED_DMAE_ADDRESS_GRC),
+ (dst_type == QED_DMAE_ADDRESS_GRC),
+ p_params);
+
+ cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == QED_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status,
+ src_addr,
+ dst_addr,
+ length_cur);
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *p_params)
+{
+ u16 pq_id = 0;
+
+ if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+ !p_params) {
+ DP_NOTICE(p_hwfn,
+ "Protocol %d received NULL PQ params\n",
+ proto);
+ return 0;
+ }
+
+ switch (proto) {
+ case PROTOCOLID_CORE:
+ if (p_params->core.tc == LB_TC)
+ pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
+ case PROTOCOLID_ETH:
+ pq_id = p_params->eth.tc;
+ break;
+ default:
+ pq_id = 0;
+ }
+
+ pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+ return pq_id;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 000000000..e56d43379
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+
+#define DMAE_CMD_SIZE 14
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *fw_data);
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 000000000..0b21a553c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+ MCM_SEC,
+ MCM_PRI,
+ UCM_SEC,
+ UCM_PRI,
+ TCM_SEC,
+ TCM_PRI,
+ YCM_SEC,
+ YCM_PRI,
+ XCM_SEC,
+ XCM_PRI,
+ NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, \
+ 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
+ 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+/* feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL 4375000
+#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_PERIOD 5 /* in us */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) max_t(u32, \
+ (((rate ? rate : 1000000) \
+ * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL 4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define EAGLE_WORKAROUND_TC 7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
+ 4) * \
+ 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS 4
+#define BTB_PURE_LB_FACTOR 10
+#define BTB_PURE_LB_RATIO 7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 0x2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
+ _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, \
+ value) SET_FIELD(var[cmd ## _ ## field ## \
+ _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
+ (max_phy_tcs_pr_port) \
+ + (tc))
+#define LB_VOQ(port) ( \
+ MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port) \
+ ((tc) < \
+ LB_TC ? PHYS_VOQ(port, \
+ tc, \
+ max_phy_tcs_pr_port) \
+ : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+ bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+ /* write RL period */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+ bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+ bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+ bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 voq,
+ u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ /* In A0 - Limit the size of pbf queue so that only 511 commands with
+ * the minimum size of 4 (FCoE minimum size)
+ */
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+ if (is_bb_a0)
+ cmdq_lines = min_t(u32, cmdq_lines, 1022);
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+
+ /* clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ u16 phys_lines, phys_lines_per_tc;
+ u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* find #lines to divide between the active
+ * physical TCs.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+ /* find #lines per active physical TC */
+ phys_lines_per_tc = phys_lines / phys_tcs;
+ /* init registers per active TC */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ /* init registers for pure LB TC */
+ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+ }
+}
+
+static void qed_btb_blocks_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u32 temp;
+ u8 phys_tcs;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* find blocks per physical TC. use factor to avoid
+ * floating arithmethic.
+ */
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (phys_tcs * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+ /* init physical TCs */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+
+ /* init pure LB TC */
+ temp = LB_VOQ(port_id);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+ u16 i, pq_id, pq_group;
+
+ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(p_params->pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+ /* go over all Tx PQs */
+ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ bool is_vf_pq = (i >= p_params->num_pf_pqs);
+ struct qm_rf_pq_map tx_pq_map;
+
+ /* update first Tx PQ of VPORT/TC */
+ u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+ p_params->start_vport;
+ u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+ u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* create new VP PQ */
+ pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+ /* map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+ /* fill PQ map entry */
+ memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ is_vf_pq ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ p_params->pq_params[i].wrr_group);
+ /* write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+ /* set base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ /* check if VF PQ */
+ if (is_vf_pq) {
+ /* if PQ is associated with a VF, add indication
+ * to PQ VF mask
+ */
+ tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+ (1 << (pq_id % tx_pq_vf_mask_width));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++) {
+ if (tx_pq_vf_mask[i]) {
+ if (is_bb_a0) {
+ u32 curr_mask = 0, addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+ if (!p_params->is_first_pf)
+ curr_mask = qed_rd(p_hwfn, p_ptt,
+ addr);
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+ STORE_RT_REG(p_hwfn, addr,
+ curr_mask | tx_pq_vf_mask[i]);
+ } else {
+ u32 addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
+ }
+ }
+ }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
+{
+ u16 i, pq_id;
+
+ /* a single other PQ group is used in each PF,
+ * where PQ group i is used in PF i.
+ */
+ u16 pq_group = pf_id;
+ u32 pq_size = num_pf_cids + num_tids;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+ /* set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u32 crd_reg_offset;
+ u32 inc_val;
+ u16 i;
+
+ if (p_params->pf_id < MAX_NUM_PFS_BB)
+ crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+ else
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
+
+ inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+
+ OVERWRITE_RT_REG(p_hwfn,
+ crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i, vport_id;
+ u32 inc_val;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+ u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ /* each VPORT can have several VPORT PQ IDs for
+ * different TCs
+ */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = pq_ids[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+ QM_WFQ_UPPER_BOUND |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ i++) {
+ udelay(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Timeout when waiting for QM SDM command ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
+{
+ if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
+{
+ /* init AFullOprtnstcCrdMask */
+ u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (p_params->pf_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (p_params->vport_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (p_params->pf_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (p_params->vport_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+ qed_cmdq_lines_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ qed_btb_blocks_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+ p_params->num_tids) *
+ QM_OTHER_PQS_PER_PF;
+ u8 tc, i;
+
+ /* clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < p_params->num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* map Other PQs (if any) */
+ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+ p_params->num_pf_cids, p_params->num_tids, 0);
+
+ /* map Tx PQs */
+ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+ if (p_params->pf_wfq)
+ if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+ return -1;
+
+ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+ return -1;
+
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLPFCRD + pf_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+ /* set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* if last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 000000000..796f1390e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+ cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val)
+{
+ p_hwfn->rt_data[rt_offset].init_val = val;
+ p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+ p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ }
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 rt_offset,
+ u32 size)
+{
+ struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++) {
+ if (!rt_data[i].b_valid)
+ continue;
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+ }
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rt_data *rt_data;
+
+ rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+ if (!rt_data)
+ return -ENOMEM;
+
+ p_hwfn->rt_data = rt_data;
+
+ return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->rt_data);
+ p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size,
+ const u32 *buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ int rc = 0;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+ const u32 *data = buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(buf + dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ /* invoke the DMAE virtual/physical buffer API with
+ * 1. DMAE init channel
+ * 2. addr,
+ * 3. p_hwfb->temp_data,
+ * 4. fill_count
+ */
+
+ return qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(&zero_buffer[0]),
+ addr, fill_count,
+ QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+ u32 offset, output_len, input_len, max_size;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ int rc = 0;
+ u32 size;
+
+ array_data = cdev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)(array_data +
+ dmae_array_offset);
+ data = le32_to_cpu(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ memset(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = qed_unzip_data(p_hwfn, input_len,
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+ rc = -EINVAL;
+ }
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ union init_write_args *arg = &cmd->args;
+ int rc = 0;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn,
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+ addr);
+ return -EINVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ qed_wr(p_hwfn, p_ptt, addr,
+ le32_to_cpu(arg->inline_val));
+ break;
+ case INIT_SRC_ZEROS:
+ if (b_must_dmae ||
+ (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ else
+ qed_init_fill(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ break;
+ case INIT_SRC_ARRAY:
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ qed_init_rt(p_hwfn, p_ptt, addr,
+ le16_to_cpu(arg->runtime.offset),
+ le16_to_cpu(arg->runtime.size));
+ break;
+ }
+
+ return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+ return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_read_op *cmd)
+{
+ u32 data = le32_to_cpu(cmd->op_data);
+ u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+ bool (*comp_check)(u32 val,
+ u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ data = le32_to_cpu(cmd->op_data);
+ if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+ int i;
+
+ switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+ case INIT_COMPARISON_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_COMPARISON_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_COMPARISON_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ comp_check = NULL;
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ data);
+ return;
+ }
+
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT &&
+ !comp_check(val, le32_to_cpu(cmd->expected_val));
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == QED_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, data);
+ }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+ u16 *offset,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = cdev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd,
+ int modes)
+{
+ u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+ if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase,
+ u32 phase_id)
+{
+ u32 data = le32_to_cpu(p_cmd->phase_data);
+ u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ int rc = 0;
+
+ num_init_ops = cdev->fw_data->init_ops_size;
+ init_ops = cdev->fw_data->init_ops;
+
+ p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ return -ENOMEM;
+ }
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = le32_to_cpu(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+ case INIT_OP_READ:
+ qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+ case INIT_OP_IF_MODE:
+ cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* qed_init_run is always invoked from
+ * sleep-able context
+ */
+ udelay(le32_to_cpu(cmd->delay.delay));
+ break;
+
+ case INIT_OP_CALLBACK:
+ qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ kfree(p_hwfn->unzip_buf);
+ return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *data)
+{
+ struct qed_fw_data *fw = cdev->fw_data;
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(cdev, "Invalid fw data\n");
+ return -EINVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 000000000..1e8320499
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_int.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 000000000..9cc9d62c1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,1144 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+ qed_int_comp_cb_t comp_cb;
+ void *cookie;
+};
+
+struct qed_sb_sp_info {
+ struct qed_sb_info sb_info;
+
+ /* per protocol index data */
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct qed_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+ struct qed_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0;
+ u16 index;
+
+ /* Make certain HW write took affect */
+ mmiowb();
+
+ index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = QED_SB_ATT_IDX;
+ }
+
+ /* Make certain we got a consistent view with HW */
+ mmiowb();
+
+ return rc;
+}
+
+/**
+ * @brief qed_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_mask;
+
+ if (deasserted_bits != 0x100)
+ DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+ /* Clear IGU indication for the deasserted bits */
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u32 attn_bits = 0, attn_acks = 0;
+ u16 asserted_bits, deasserted_bits;
+ __le16 index;
+ int rc = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = p_sb_attn->sb_index;
+ attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ } while (index != p_sb_attn->sb_index);
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ } else if (asserted_bits == 0x100) {
+ DP_INFO(p_hwfn,
+ "MFW indication via attention\n");
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication [deassertion]\n");
+ }
+
+ if (asserted_bits) {
+ rc = qed_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits) {
+ rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+ void __iomem *igu_addr,
+ u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_pi_info *pi_info = NULL;
+ struct qed_sb_attn_info *sb_attn;
+ struct qed_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->cdev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+
+ rc = qed_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & QED_SB_EVENT_MASK)) {
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & QED_SB_ATT_IDX)
+ qed_int_attentions(p_hwfn);
+
+ if (rc & QED_SB_IDX) {
+ int pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & QED_SB_ATT_IDX))
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (p_sb) {
+ if (p_sb->sb_attn)
+ dma_free_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb;
+ void *p_virt;
+ dma_addr_t p_phys = 0;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ u32 cau_state;
+
+ memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ /* setting the time resultion to a fixed value ( = 1) */
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+ QED_CAU_DEF_RX_TIMER_RES);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+ QED_CAU_DEF_TX_TIMER_RES);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_hwfn->cdev->rx_coalesce_usecs)
+ p_hwfn->cdev->rx_coalesce_usecs =
+ QED_CAU_DEF_RX_USECS;
+ if (!p_hwfn->cdev->tx_coalesce_usecs)
+ p_hwfn->cdev->tx_coalesce_usecs =
+ QED_CAU_DEF_TX_USECS;
+ }
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+ u32 val;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+ upper_32_bits(sb_phys));
+
+ val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+ (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 num_tc = 1, i;
+
+ qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ QED_COAL_RX_STATE_MACHINE,
+ timeset);
+
+ timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+ (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+ for (i = 0; i < num_tc; i++) {
+ qed_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ QED_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset;
+ u32 pi_offset;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == QED_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+ (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+ return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != QED_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+
+ sb_info->cdev = p_hwfn->cdev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+
+ sb_info->flags |= QED_SB_INFO_INIT;
+
+ qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == QED_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+
+ return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (p_sb) {
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+ p_phys, QED_SP_SB_ID);
+
+ memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ if (!p_hwfn)
+ return;
+
+ if (p_hwfn->p_sp_sb)
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup Slow path status block - NULL pointer\n");
+
+ if (p_hwfn->p_sb_attn)
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ qed_status = 0;
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+ qed_status = 0;
+ }
+
+ return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->cdev->int_mode = int_mode;
+ switch (p_hwfn->cdev->int_mode) {
+ case QED_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case QED_INT_MODE_POLL:
+ break;
+ }
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ int rc, i;
+
+ /* Mask non-link attentions */
+ for (i = 0; i < 9; i++)
+ qed_wr(p_hwfn, p_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+ /* Configure AEU signal change to produce attentions for link */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+ /* Flush the writes to IGU */
+ mmiowb();
+
+ /* Unmask AEU signals toward IGU */
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+ if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_slowpath_irq_req(p_hwfn);
+ if (rc != 0) {
+ DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
+ return -EINVAL;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid
+ )
+{
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u32 data = 0;
+ u32 cmd_ctrl = 0;
+ u32 val = 0;
+ u32 sb_bit = 0;
+ u32 sb_bit_addr = 0;
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ barrier();
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ mmiowb();
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+ /* Now wait for the command to complete */
+ do {
+ val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+
+ usleep_range(5000, 10000);
+ } while (--sleep_cnt);
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set)
+{
+ int pi;
+
+ /* Set */
+ if (b_set)
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0;
+ u32 val = 0;
+
+ val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (b_slowpath) {
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *blk;
+ u32 val;
+ u16 sb_id;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+ if (!p_hwfn->hw_info.p_igu_info)
+ return -ENOMEM;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ blk->status = QED_IGU_STATUS_VALID;
+ blk->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ blk->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+ val, blk->function_id, blk->is_pf,
+ blk->vector_number);
+
+ if (blk->is_pf) {
+ if (blk->function_id == p_hwfn->rel_pf_id) {
+ blk->status |= QED_IGU_STATUS_PF;
+
+ if (blk->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb ==
+ 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->cdev,
+ "consecutive igu vectors for HWFN %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff ||
+ p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn,
+ "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = 0;
+
+ igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+ u64 intr_status = 0;
+ u32 intr_status_lo = 0;
+ u32 intr_status_hi = 0;
+ u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ lsb_igu_cmd_addr * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ msb_igu_cmd_addr * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+ tasklet_init(p_hwfn->sp_dpc,
+ qed_int_sp_dpc, (unsigned long)p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ if (!p_hwfn->sp_dpc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ rc = qed_int_sp_dpc_alloc(p_hwfn);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+ rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+ rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+ return rc;
+ }
+ return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+ qed_int_sp_sb_free(p_hwfn);
+ qed_int_sb_attn_free(p_hwfn);
+ qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks)
+{
+ struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info)
+ return 0;
+
+ if (p_iov_blks)
+ *p_iov_blks = info->free_blks;
+
+ return info->igu_sb_cnt;
+}
+
+void qed_int_disable_post_isr_release(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ cdev->hwfns[i].b_int_requested = false;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_int.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 000000000..51e0b09a7
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,396 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+enum qed_coalescing_fsm {
+ QED_COAL_RX_STATE_MACHINE,
+ QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks);
+
+/**
+ * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param cdev
+ *
+ */
+void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX 0x0001
+#define QED_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+ u8 status;
+#define QED_IGU_STATUS_FREE 0x01
+#define QED_IGU_STATUS_VALID 0x02
+#define QED_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct qed_igu_map {
+ struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+ struct qed_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param cleanup_set - set(1) / clear(0)
+ * @param opaque_fid - the function for which to perform
+ * cleanup, for example a PF on behalf of
+ * its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+ * @return int
+ */
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_l2.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 000000000..f72036a2e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_approx_mcast_flg;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_fid,
+ u16 opaque_fid,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ struct qed_sp_init_request_params params;
+ struct vport_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+ u16 rx_mode = 0;
+ u8 abs_vport_id = 0;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = cpu_to_le16(mtu);
+ p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+ p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+ /* TPA related fields */
+ memset(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+ concrete_fid);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_rss_params *p_params)
+{
+ struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+ u16 abs_l2_queue = 0, capabilities = 0;
+ int rc = 0, i;
+
+ if (!p_params) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+ rss->update_rss_capabilities = p_params->update_rss_capabilities;
+ rss->update_rss_ind_table = p_params->update_rss_ind_table;
+ rss->update_rss_key = p_params->update_rss_key;
+
+ rss->rss_mode = p_params->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+ rss->tbl_size = p_params->rss_table_size_log;
+
+ rss->capabilities = cpu_to_le16(capabilities);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ rss->rss_mode, rss->update_rss_capabilities,
+ capabilities, rss->update_rss_ind_table,
+ rss->update_rss_key);
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ rc = qed_fw_l2_queue(p_hwfn,
+ (u8)p_params->rss_ind_table[i],
+ &abs_l2_queue);
+ if (rc)
+ return rc;
+
+ rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+ i, rss->indirection_table[i]);
+ }
+
+ for (i = 0; i < 10; i++)
+ rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+ return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n", state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n", state);
+ }
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sp_vport_update_params *p_params)
+{
+ int i;
+
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (p_params->update_approx_mcast_flg) {
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+ __le32 val = cpu_to_le32(p_bins[i]);
+
+ p_ramrod->approx_mcast.bins[i] = val;
+ }
+ }
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct qed_sp_init_request_params sp_params;
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_params->opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc) {
+ /* Return spq entry which is taken in qed_sp_init_request()*/
+ qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id)
+{
+ struct qed_sp_init_request_params sp_params;
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+ u8 vport,
+ struct qed_filter_accept_flags accept_flags,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+ }
+
+ return 0;
+}
+
+static int qed_sp_release_queue_cid(
+ struct qed_hwfn *p_hwfn,
+ struct qed_hw_cid_data *p_cid_data)
+{
+ if (!p_cid_data->b_cid_allocated)
+ return 0;
+
+ qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+ p_cid_data->b_cid_allocated = false;
+
+ return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ /* Store information for the stop */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = params->vport_id;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+ if (rc != 0)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, cid, params->queue_id, params->vport_id,
+ params->sb);
+
+ memset(&sp_params, 0, sizeof(params));
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ cid, opaque_fid,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(params->sb);
+ p_ramrod->sb_index = params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
+ p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
+ p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ struct qed_hw_cid_data *p_rx_cid;
+ u64 init_prod_val = 0;
+ u16 abs_l2_queue = 0;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+ if (rc != 0)
+ return rc;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+
+ /* Allocate a CID for the queue */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_rx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_rx_cid->b_cid_allocated = true;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_rx_cid->cid,
+ params,
+ abs_stats_id,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size);
+
+ if (rc != 0)
+ qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u16 abs_rx_q_id = 0;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_rx_cid->cid,
+ p_rx_cid->opaque_fid,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg =
+ (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+ !eq_completion_only) || cqe_completion;
+ p_ramrod->complete_event_flg =
+ !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+ eq_completion_only;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_tx_cid;
+ u8 abs_vport_id;
+ int rc = -EINVAL;
+ u16 pq_id;
+
+ /* Store information for the stop */
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ p_tx_cid->cid = cid;
+ p_tx_cid->opaque_fid = opaque_fid;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+ opaque_fid,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->tc = p_pq_params->eth.tc;
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+ p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+ pq_id = qed_get_qm_pq(p_hwfn,
+ PROTOCOLID_ETH,
+ p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hw_cid_data *p_tx_cid;
+ union qed_qm_pq_params pq_params;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
+ return rc;
+
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+ memset(&pq_params, 0, sizeof(pq_params));
+
+ /* Allocate a CID for the queue */
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_tx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_tx_cid->b_cid_allocated = true;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, p_tx_cid->cid,
+ p_params->queue_id, p_params->vport_id, p_params->sb);
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_tx_cid->cid,
+ p_params,
+ abs_stats_id,
+ pbl_addr,
+ pbl_size,
+ &pq_params);
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+ if (rc)
+ qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id)
+{
+ struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_tx_cid->cid,
+ p_tx_cid->opaque_fid,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case QED_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case QED_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case QED_FILTER_REPLACE:
+ case QED_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REPLACE;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct qed_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct qed_sp_init_request_params sp_params;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ enum eth_filter_action action;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, pp_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+ switch (p_filter_cmd->opcode) {
+ case QED_FILTER_FLUSH:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case QED_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+ case QED_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+ case QED_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+ case QED_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+ case QED_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+ case QED_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+ case QED_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case QED_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+ case QED_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+ qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else {
+ action = qed_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return -EINVAL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id = (p_filter_cmd->opcode ==
+ QED_FILTER_REMOVE) ?
+ vport_to_remove_from :
+ vport_to_add_to;
+ }
+
+ return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct eth_filter_cmd_header *p_header;
+ int rc;
+
+ rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "Unicast filter ADD command failed %d\n",
+ rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter,
+ p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0],
+ p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2],
+ p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4],
+ p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length,
+ u32 crc32_seed,
+ u8 complement)
+{
+ u32 byte = 0;
+ u32 bit = 0;
+ u8 msb = 0;
+ u8 current_byte = 0;
+ u32 crc32_result = crc32_seed;
+
+ if ((!crc32_packet) ||
+ (crc32_length == 0) ||
+ ((crc32_length % 8) != 0))
+ return crc32_result;
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1; /*crc32_result[0] = 1;*/
+ }
+ }
+ }
+ return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+ u8 *mac,
+ u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc, i;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ }
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+ memset(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport
+ */
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)bins;
+ struct vport_update_ramrod_mcast *approx_mcast;
+
+ approx_mcast = &p_ramrod->approx_mcast;
+ approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+ (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+ return -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+ return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+
+ return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info)
+{
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+ QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues = min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_start(p_hwfn,
+ p_hwfn->hw_info.concrete_fid,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id,
+ mtu,
+ drop_ttl0_flg,
+ inner_vlan_removal_en_flg);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ qed_hw_start_fastpath(p_hwfn);
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started V-PORT %d with MTU %d\n",
+ vport_id, mtu);
+ }
+
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+ u8 vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+ struct qed_update_vport_params *params)
+{
+ struct qed_sp_vport_update_params sp_params;
+ struct qed_rss_params sp_rss_params;
+ int rc, i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg =
+ params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg =
+ params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+ /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+ * We need to re-fix the rss values per engine for CMT.
+ */
+ if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+ struct qed_update_vport_rss_params *rss =
+ &params->rss_params;
+ int k, max = 0;
+
+ /* Find largest entry, since it's possible RSS needs to
+ * be disabled [in case only 1 queue per-hwfn]
+ */
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ max = (max > rss->rss_ind_table[k]) ?
+ max : rss->rss_ind_table[k];
+
+ /* Either fix RSS values or disable RSS */
+ if (cdev->num_hwfns < max + 1) {
+ int divisor = (max + cdev->num_hwfns - 1) /
+ cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - fixing RSS values (modulo %02x)\n",
+ divisor);
+
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ rss->rss_ind_table[k] =
+ rss->rss_ind_table[k] % divisor;
+ } else {
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ params->update_rss_flg = 0;
+ }
+ }
+
+ /* Now, update the RSS configuration for actual configuration */
+ if (params->update_rss_flg) {
+ sp_rss_params.update_rss_config = 1;
+ sp_rss_params.rss_enable = 1;
+ sp_rss_params.update_rss_capabilities = 1;
+ sp_rss_params.update_rss_ind_table = 1;
+ sp_rss_params.update_rss_key = 1;
+ sp_rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+ memcpy(sp_rss_params.rss_ind_table,
+ params->rss_params.rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+ memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+ QED_RSS_KEY_SIZE * sizeof(u32));
+ }
+ sp_params.rss_params = &sp_rss_params;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = qed_sp_vport_update(p_hwfn, &sp_params,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_ERR(cdev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ pp_prod);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ params->queue_id, params->rss_id, params->vport_id,
+ params->sb);
+
+ return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ params->rx_queue_id / cdev->num_hwfns,
+ params->eq_completion_only,
+ false);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = p_params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ p_params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ pbl_addr,
+ pbl_size,
+ pp_doorbell);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, p_params->rss_id, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+ qed_hw_stop_fastpath(cdev);
+
+ return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ params->tx_queue_id / cdev->num_hwfns);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qed_filter_accept_flags accept_flags;
+
+ memset(&accept_flags, 0, sizeof(accept_flags));
+
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+ accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+ return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params)
+{
+ struct qed_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(
+ cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = QED_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = QED_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = QED_FILTER_MAC_VLAN;
+ ether_addr_copy(ucast.mac, params->mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = QED_FILTER_MAC;
+ ether_addr_copy(ucast.mac, params->mac);
+ } else {
+ ucast.type = QED_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params)
+{
+ struct qed_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = QED_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+ return qed_filter_mcast_cmd(cdev, &mcast,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+ struct qed_filter_params *params)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+
+ switch (params->type) {
+ case QED_FILTER_TYPE_UCAST:
+ return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+ case QED_FILTER_TYPE_MCAST:
+ return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+ case QED_FILTER_TYPE_RX_MODE:
+ accept_flags = params->filter.accept_flags;
+ return qed_configure_filter_rx_mode(cdev, accept_flags);
+ default:
+ DP_NOTICE(cdev, "Unknown filter type %d\n",
+ (int)params->type);
+ return -EINVAL;
+ }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+ cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_eth_dev_info,
+ .register_ops = &qed_register_eth_ops,
+ .vport_start = &qed_start_vport,
+ .vport_stop = &qed_stop_vport,
+ .vport_update = &qed_update_vport,
+ .q_rx_start = &qed_start_rxq,
+ .q_rx_stop = &qed_stop_rxq,
+ .q_tx_start = &qed_start_txq,
+ .q_tx_stop = &qed_stop_txq,
+ .filter_config = &qed_configure_filter,
+ .fastpath_stop = &qed_fastpath_stop,
+ .eth_cqe_completion = &qed_fp_cqe_completion,
+ .get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+ if (version != QED_ETH_INTERFACE_VERSION) {
+ pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+ version, QED_ETH_INTERFACE_VERSION);
+ return NULL;
+ }
+
+ return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+ /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_main.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 000000000..174f7341c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,1149 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+ "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION \
+ __stringify(FW_MAJOR_VERSION) "." \
+ __stringify(FW_MINOR_VERSION) "." \
+ __stringify(FW_REVISION_VERSION) "." \
+ __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME \
+ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+ pr_notice("qed_init called\n");
+
+ pr_info("%s", version);
+
+ return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+ pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+ struct device *dev = &cdev->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ DP_NOTICE(cdev,
+ "Can't request 64-bit consistent allocations\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+ struct pci_dev *pdev = cdev->pdev;
+
+ if (cdev->doorbells)
+ iounmap(cdev->doorbells);
+ if (cdev->regview)
+ iounmap(cdev->regview);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ cdev->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Cannot enable PCI device\n");
+ goto err0;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #0\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #2\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, "qed");
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to request PCI memory resources\n");
+ goto err1;
+ }
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ DP_NOTICE(cdev, "The bus is not PCI Express\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (cdev->pci_params.pm_cap == 0)
+ DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+ rc = qed_set_coherency_mask(cdev);
+ if (rc)
+ goto err2;
+
+ cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+ cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+ cdev->pci_params.irq = pdev->irq;
+
+ cdev->regview = pci_ioremap_bar(pdev, 0);
+ if (!cdev->regview) {
+ DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_disable_device(pdev);
+err0:
+ return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info)
+{
+ struct qed_ptt *ptt;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ dev_info->num_hwfns = cdev->num_hwfns;
+ dev_info->pci_mem_start = cdev->pci_params.mem_start;
+ dev_info->pci_mem_end = cdev->pci_params.mem_end;
+ dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+
+ qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+
+ return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+ kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+ struct qed_dev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return cdev;
+
+ qed_init_struct(cdev);
+
+ return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+ pci_power_t state)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+ return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+ enum qed_protocol protocol,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct qed_dev *cdev;
+ int rc;
+
+ cdev = qed_alloc_cdev(pdev);
+ if (!cdev)
+ goto err0;
+
+ cdev->protocol = protocol;
+
+ qed_init_dp(cdev, dp_module, dp_level);
+
+ rc = qed_init_pci(cdev, pdev);
+ if (rc) {
+ DP_ERR(cdev, "init pci failed\n");
+ goto err1;
+ }
+ DP_INFO(cdev, "PCI init completed successfully\n");
+
+ rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(cdev, "hw prepare failed\n");
+ goto err2;
+ }
+
+ DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+ return cdev;
+
+err2:
+ qed_free_pci(cdev);
+err1:
+ qed_free_cdev(cdev);
+err0:
+ return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return;
+
+ qed_hw_remove(cdev);
+
+ qed_free_pci(cdev);
+
+ qed_set_power_state(cdev, PCI_D3hot);
+
+ qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ pci_disable_msix(cdev->pdev);
+ kfree(cdev->int_params.msix_table);
+ } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+ pci_disable_msi(cdev->pdev);
+ }
+
+ memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+ struct qed_int_params *int_params)
+{
+ int i, rc, cnt;
+
+ cnt = int_params->in.num_vectors;
+
+ for (i = 0; i < cnt; i++)
+ int_params->msix_table[i].entry = i;
+
+ rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+ int_params->in.min_msix_cnt, cnt);
+ if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+ (rc % cdev->num_hwfns)) {
+ pci_disable_msix(cdev->pdev);
+
+ /* If fastpath is initialized, we need at least one interrupt
+ * per hwfn [and the slow path interrupts]. New requested number
+ * should be a multiple of the number of hwfns.
+ */
+ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+ DP_NOTICE(cdev,
+ "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+ cnt, int_params->in.num_vectors);
+ rc = pci_enable_msix_exact(cdev->pdev,
+ int_params->msix_table, cnt);
+ if (!rc)
+ rc = cnt;
+ }
+
+ if (rc > 0) {
+ /* MSI-x configuration was achieved */
+ int_params->out.int_mode = QED_INT_MODE_MSIX;
+ int_params->out.num_vectors = rc;
+ rc = 0;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+ cnt, rc);
+ }
+
+ return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+ struct qed_int_params *int_params = &cdev->int_params;
+ struct msix_entry *tbl;
+ int rc = 0, cnt;
+
+ switch (int_params->in.int_mode) {
+ case QED_INT_MODE_MSIX:
+ /* Allocate MSIX table */
+ cnt = int_params->in.num_vectors;
+ int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+ if (!int_params->msix_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable MSIX */
+ rc = qed_enable_msix(cdev, int_params);
+ if (!rc)
+ goto out;
+
+ DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+ kfree(int_params->msix_table);
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_MSI:
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
+
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_INTA:
+ int_params->out.int_mode = QED_INT_MODE_INTA;
+ rc = 0;
+ goto out;
+ default:
+ DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+ int_params->in.int_mode);
+ rc = -EINVAL;
+ }
+
+out:
+ cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+ return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+ int index, void(*handler)(void *))
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ hwfn->simd_proto_handler[relative_idx].func = handler;
+ hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ memset(&hwfn->simd_proto_handler[relative_idx], 0,
+ sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+ struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+ struct qed_hwfn *hwfn;
+ irqreturn_t rc = IRQ_NONE;
+ u64 status;
+ int i, j;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+ if (!status)
+ continue;
+
+ hwfn = &cdev->hwfns[i];
+
+ /* Slowpath interrupt */
+ if (unlikely(status & 0x1)) {
+ tasklet_schedule(hwfn->sp_dpc);
+ status &= ~0x1;
+ rc = IRQ_HANDLED;
+ }
+
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+ hwfn->simd_proto_handler[j].func(
+ hwfn->simd_proto_handler[j].token);
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ if (unlikely(status))
+ DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+ "got an unknown interrupt status 0x%llx\n",
+ status);
+ }
+
+ return rc;
+}
+
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ int rc = 0;
+ u8 id;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ id = hwfn->my_id;
+ snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
+ id, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ rc = request_irq(cdev->int_params.msix_table[id].vector,
+ qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ if (!rc)
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath MSI-X\n");
+ } else {
+ unsigned long flags = 0;
+
+ snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+ cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+ PCI_FUNC(cdev->pdev->devfn));
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+ flags |= IRQF_SHARED;
+
+ rc = request_irq(cdev->pdev->irq, qed_single_int,
+ flags, cdev->name, cdev);
+ }
+
+ return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+ int i;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].b_int_requested)
+ break;
+ synchronize_irq(cdev->int_params.msix_table[i].vector);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ } else {
+ if (QED_LEADING_HWFN(cdev)->b_int_requested)
+ free_irq(cdev->pdev->irq, cdev);
+ }
+ qed_int_disable_post_isr_release(cdev);
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+ int i, rc;
+
+ rc = qed_hw_stop(cdev);
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = false;
+ DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+ "Disabled sp taskelt [hwfn %d] at %p\n",
+ i, p_hwfn->sp_dpc);
+ }
+ }
+
+ return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_hw_reset(cdev);
+ if (rc)
+ return rc;
+
+ qed_resc_free(cdev);
+
+ return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_resc_alloc(cdev);
+ if (rc)
+ return rc;
+
+ DP_INFO(cdev, "Allocated qed resources\n");
+
+ qed_resc_setup(cdev);
+
+ return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+ limit = cdev->num_hwfns * 63;
+ else if (cdev->int_params.fp_msix_cnt)
+ limit = cdev->int_params.fp_msix_cnt;
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(struct qed_int_info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ /* Need to expose only MSI-X information; Single IRQ is handled solely
+ * by qed.
+ */
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.fp_msix_base;
+
+ info->msix_cnt = cdev->int_params.fp_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+ enum qed_int_mode int_mode)
+{
+ int rc, i;
+ u8 num_vectors = 0;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+ cdev->int_params.in.int_mode = int_mode;
+ for_each_hwfn(cdev, i)
+ num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+ cdev->int_params.in.num_vectors = num_vectors;
+
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ return rc;
+ }
+
+ cdev->int_params.fp_msix_base = cdev->num_hwfns;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+ cdev->num_hwfns;
+
+ return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+ zlib_inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+ p_hwfn->stream->msg, rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+ void *workspace;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+
+ workspace = vzalloc(zlib_inflate_workspacesize());
+ if (!workspace)
+ return -ENOMEM;
+ p_hwfn->stream->workspace = workspace;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ vfree(p_hwfn->stream->workspace);
+ kfree(p_hwfn->stream);
+ }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+ struct qed_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+ struct qed_slowpath_params *params)
+{
+ struct qed_mcp_drv_version drv_version;
+ const u8 *data = NULL;
+ struct qed_hwfn *hwfn;
+ int rc;
+
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
+
+ rc = qed_nic_setup(cdev);
+ if (rc)
+ goto err;
+
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (rc)
+ goto err1;
+
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err2;
+ }
+
+ /* Start the slowpath */
+ data = cdev->firmware->data;
+
+ rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ true, data);
+ if (rc)
+ goto err3;
+
+ DP_INFO(cdev,
+ "HW initialization and function start completed successfully\n");
+
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
+
+ return 0;
+
+err3:
+ qed_free_stream_mem(cdev);
+ qed_slowpath_irq_free(cdev);
+err2:
+ qed_disable_msix(cdev);
+err1:
+ qed_resc_free(cdev);
+err:
+ release_firmware(cdev->firmware);
+
+ return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ qed_free_stream_mem(cdev);
+
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+
+ qed_disable_msix(cdev);
+ qed_nic_reset(cdev);
+
+ release_firmware(cdev->firmware);
+
+ return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+ char ver_str[VER_SIZE])
+{
+ int i;
+
+ memcpy(cdev->name, name, NAME_SIZE);
+ for_each_hwfn(cdev, i)
+ snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+ memcpy(cdev->ver_str, ver_str, VER_SIZE);
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u8 n_hwfns;
+ u32 rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = cdev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u32 rc;
+
+ hwfn_index = sb_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+ return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+ struct qed_link_params *params)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_mcp_link_params *link_params;
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* The link should be set only once per PF */
+ hwfn = &cdev->hwfns[0];
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = qed_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ link_params->speed.advertised_speeds = 0;
+ if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+ (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+ link_params->speed.forced_speed = params->forced_speed;
+
+ rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+ int port_type;
+
+ switch (media_type) {
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_KR:
+ port_type = PORT_FIBRE;
+ break;
+ case MEDIA_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case MEDIA_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case MEDIA_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case MEDIA_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_mcp_link_capabilities link_caps;
+ u32 media_type;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ /* TODO - at the moment assume supported and advertised speed equal */
+ if_link->supported_caps = SUPPORTED_FIBRE;
+ if (params.speed.autoneg)
+ if_link->supported_caps |= SUPPORTED_Autoneg;
+ if (params.pause.autoneg ||
+ (params.pause.forced_rx && params.pause.forced_tx))
+ if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= SUPPORTED_Pause;
+
+ if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= 0;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->advertised_caps |= 0;
+
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= 0;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->supported_caps |= 0;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ /* TODO - fill duplex properly */
+ if_link->duplex = DUPLEX_FULL;
+ qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ if_link->port = qed_get_port_type(media_type);
+
+ if_link->autoneg = params.speed.autoneg;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ /* Link partner capabilities */
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= 0;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= 0;
+
+ if (link.an_complete)
+ if_link->lp_caps |= SUPPORTED_Autoneg;
+
+ if (link.partner_adv_pause)
+ if_link->lp_caps |= SUPPORTED_Pause;
+ if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+ link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+ if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+ struct qed_link_output *if_link)
+{
+ qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+
+ if (IS_LEAD_HWFN(hwfn) && cookie)
+ op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, rc;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ .probe = &qed_probe,
+ .remove = &qed_remove,
+ .set_power_state = &qed_set_power_state,
+ .set_id = &qed_set_id,
+ .update_pf_params = &qed_update_pf_params,
+ .slowpath_start = &qed_slowpath_start,
+ .slowpath_stop = &qed_slowpath_stop,
+ .set_fp_int = &qed_set_int_fp,
+ .get_fp_int = &qed_get_int_fp,
+ .sb_init = &qed_sb_init,
+ .sb_release = &qed_sb_release,
+ .simd_handler_config = &qed_simd_handler_config,
+ .simd_handler_clean = &qed_simd_handler_clean,
+ .set_link = &qed_set_link,
+ .get_link = &qed_get_current_link,
+ .drain = &qed_drain,
+ .update_msglvl = &qed_init_dp,
+ .chain_alloc = &qed_chain_alloc,
+ .chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+ switch (protocol) {
+ case QED_PROTOCOL_ETH:
+ return QED_ETH_INTERFACE_VERSION;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 000000000..20d048cdc
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ u32 tmp, i;
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ /* The MB data is actually BE; Need to force it to cpu */
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ be32_to_cpu((__force __be32)tmp);
+ }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ kfree(p_hwfn->mcp_info->mfw_mb_cur);
+ kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+ }
+ kfree(p_hwfn->mcp_info);
+
+ return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return 0;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+ DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return 0;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_shadow =
+ kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+ p_info->mfw_mb_length), GFP_ATOMIC);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW mutex */
+ mutex_init(&p_info->mutex);
+
+ return 0;
+
+err:
+ DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+ qed_mcp_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ int rc = 0;
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+ (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < QED_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = -EAGAIN;
+ }
+
+ return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 seq, cnt = 1, actual_mb_seq;
+ int rc = 0;
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "wrote command (%x) to MFW MB param 0x%08x\n",
+ (cmd | seq), param);
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt * delay, *o_mcp_resp, seq);
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ *o_mcp_resp = 0;
+ rc = -EAGAIN;
+ }
+ return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ int rc = 0;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Lock Mutex to ensure only single thread is
+ * accessing the MCP at one time
+ */
+ mutex_lock(&p_hwfn->mcp_info->mutex);
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param);
+ /* Release Mutex */
+ mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+ return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 i;
+
+ /* Copy version string to MCP */
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+ *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 param;
+ int rc;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Save driver's version to shmem */
+ qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+ (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type),
+ p_load_code, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* If MFW refused (e.g. other port is in diagnostic mode) we
+ * must abort. This can happen in the following cases:
+ * - Other port is in diagnostic mode
+ * - Previously loaded function on the engine is not compliant with
+ * the requester.
+ * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+ * -
+ */
+ if (!(*p_load_code) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+ DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_reset)
+{
+ struct qed_mcp_link_state *p_link;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ memset(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+ "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+ status,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* Correct speed according to bandwidth allocation */
+ if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+ p_link->speed = p_link->speed *
+ p_hwfn->mcp_info->func_info.bandwidth_max /
+ 100;
+ qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_link->speed);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+ }
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up)
+{
+ struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ u32 param = 0, reply = 0, cmd;
+ struct pmm_phy_cfg phy_cfg;
+ int rc = 0;
+ u32 i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Set the shmem configuration according to params */
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* Write the requested configuration to shmem */
+ for (i = 0; i < sizeof(phy_cfg); i += 4)
+ qed_wr(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data) + i,
+ ((u32 *)&phy_cfg)[i >> 2]);
+
+ if (b_up) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link\n");
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *info = p_hwfn->mcp_info;
+ int rc = 0;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ qed_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ qed_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32),
+ (__force u32)val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Received an MFW message indication but no new message!\n");
+ rc = -EINVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *p_mfw_ver)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+ u32 global_offsize;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ global_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global, mfw_ver));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *p_media_type)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, media_type));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+ struct public_func *p_info,
+ enum qed_pci_personality *p_proto)
+{
+ int rc = 0;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ *p_proto = QED_PCI_ETH;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return -EINVAL;
+ }
+
+ if (p_hwfn->cdev->mf_mode != SF) {
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
+ }
+ }
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+ }
+
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ info->wwn_port, info->wwn_node, info->ovlan);
+
+ return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 100,
+ &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ msleep(120);
+
+ return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+ flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver)
+{
+ int rc = 0;
+ u32 param = 0, reply = 0, i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+ p_ver->version);
+ /* Copy version string to shmem */
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+ DRV_MB_WR(p_hwfn, p_ptt,
+ union_data.drv_version.name[i * sizeof(u32)],
+ *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+ &param);
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 000000000..dbaae586b
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct qed_mcp_link_params {
+ struct qed_mcp_link_speed_params speed;
+ struct qed_mcp_link_pause_params pause;
+ u32 loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+ u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+ bool link_up;
+
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+ u8 pause_on_host;
+
+ enum qed_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define QED_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+};
+
+struct qed_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct qed_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP.
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ * response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+ struct mutex mutex; /* MCP access lock */
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+ u32 port_addr;
+ u16 drv_mb_seq;
+ u16 drv_pulse_seq;
+ struct qed_mcp_link_params link_input;
+ struct qed_mcp_link_state link_output;
+ struct qed_mcp_link_capabilities link_capabilities;
+ struct qed_mcp_function_info func_info;
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ * succeed, returns whether this PF is the first on the
+ * chip/engine/port or function. This function should be
+ * called when driver is ready to accept MFW events after
+ * Storms initializations are done.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_load_code - The MCP response param containing one
+ * of the following:
+ * FW_MSG_CODE_DRV_LOAD_ENGINE
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 000000000..e8df12335
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,370 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_FCOE \
+ 0x1f0408UL
+#define PRS_REG_SEARCH_ROCE \
+ 0x1f040cUL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#define PGLUE_B_REG_PF_BAR0_SIZE \
+ 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE \
+ 0x2aae64UL
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_sp.h b/kernel/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 000000000..287fadfab
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,364 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+ QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ QED_SPQ_MODE_CB, /* Client supplies a callback */
+ QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+ void (*function)(struct qed_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ * @file
+ *
+ * QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ QED_SPQ_PRIORITY_NORMAL,
+ QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+ struct qed_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+struct qed_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct qed_spq_entry {
+ struct list_head list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ struct list_head *queue;
+
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+ struct qed_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct qed_consq {
+ struct qed_chain chain;
+};
+
+struct qed_spq {
+ spinlock_t lock; /* SPQ lock */
+
+ struct list_head unlimited_pending;
+ struct list_head pending;
+ struct list_head completion_pending;
+ struct list_head free_pool;
+
+ struct qed_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct qed_spq_entry *p_virt;
+
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+
+ /* Bitmap for handling out-of-order completions */
+ DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
+ u8 comp_bitmap_idx;
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ * struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ * state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION 0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+ size_t ramrod_data_size;
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 000000000..6f7879136
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params)
+{
+ int rc = -EINVAL;
+ struct qed_spq_entry *p_ent = NULL;
+ u32 opaque_cid = opaque_fid << 16 | cid;
+
+ if (!pp_ent)
+ return -ENOMEM;
+
+ rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+ if (rc != 0)
+ return rc;
+
+ p_ent = *pp_ent;
+
+ p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+
+ p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_params->p_comp_data)
+ return -EINVAL;
+
+ p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ break;
+
+ case QED_SPQ_MODE_CB:
+ if (!p_params->p_comp_data)
+ p_ent->comp_cb.function = NULL;
+ else
+ p_ent->comp_cb = *p_params->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+ if (p_params->ramrod_data_size)
+ memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode)
+{
+ struct qed_sp_init_request_params params;
+ struct pf_start_ramrod_data *p_ramrod = NULL;
+ u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ /* update initial eq producer */
+ qed_eq_prod_update(p_hwfn,
+ qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn,
+ &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.pf_start;
+
+ p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = QED_PATH_ID(p_hwfn);
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = cpu_to_le16(0xf);
+ p_ramrod->mf_mode = mode;
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* Place EQ address in RAMROD */
+ p_ramrod->event_ring_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+ p_ramrod->consolid_q_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_ramrod->consolid_q_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+ p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ sb, sb_index,
+ (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+ p_ramrod->outer_tag);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sp_init_request_params params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&params, 0, sizeof(params));
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 000000000..3dd548ab8
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,893 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_spq_comp_done *comp_done;
+
+ comp_done = (struct qed_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret)
+{
+ int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != 0)
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+ /* Retry after drain */
+ sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+
+ DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+ return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ case QED_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = qed_spq_blocking_cb;
+ break;
+ case QED_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid,
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi,
+ p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq)
+{
+ u16 pq;
+ struct qed_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ union qed_qm_pq_params pq_params;
+ int rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* QM physical queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ p_cxt->xstorm_st_context.consolid_base_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.consolid_base_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq,
+ struct qed_spq_entry *p_ent)
+{
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ u16 echo = qed_chain_get_prod_idx(p_chain);
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ p_ent->elem.hdr.echo = cpu_to_le16(echo);
+ elem = qed_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+ return -EINVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ memset(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* validate producer is up to-date */
+ rmb();
+
+ db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+ /* do not reorder */
+ barrier();
+
+ DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ mmiowb();
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+ qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+ p_spq->cid, db.params, db.agg_flags,
+ qed_chain_get_prod_idx(p_chain));
+
+ return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie)
+
+{
+ struct qed_eq *p_eq = cookie;
+ struct qed_chain *p_chain = &p_eq->chain;
+ int rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+ qed_chain_get_usable_per_page(p_chain))
+ fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+ if (!p_eqe) {
+ rc = -EINVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode,
+ p_eqe->protocol_id,
+ p_eqe->reserved0,
+ le16_to_cpu(p_eqe->echo),
+ p_eqe->fw_return_code,
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (qed_async_event_completion(p_hwfn, p_eqe))
+ rc = -EINVAL;
+ } else if (qed_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = -EINVAL;
+ }
+
+ qed_chain_recycle_consumed(p_chain);
+ }
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem)
+{
+ struct qed_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ qed_int_register_cb(p_hwfn,
+ qed_eq_completion,
+ p_eq,
+ &p_eq->eq_sb_index,
+ &p_eq->p_fw_cons);
+
+ return p_eq;
+
+eq_allocate_fail:
+ qed_eq_free(p_hwfn, p_eq);
+ return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ if (!p_eq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+ kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+ struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
+{
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ int rc;
+
+ rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+
+ return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ unsigned int i = 0;
+
+ INIT_LIST_HEAD(&p_spq->pending);
+ INIT_LIST_HEAD(&p_spq->completion_pending);
+ INIT_LIST_HEAD(&p_spq->free_pool);
+ INIT_LIST_HEAD(&p_spq->unlimited_pending);
+ spin_lock_init(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ for (i = 0; i < p_spq->chain.capacity; i++) {
+ p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+ p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+ list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct qed_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+
+ bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ qed_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ struct qed_spq_entry *p_virt = NULL;
+
+ /* SPQ struct */
+ p_spq =
+ kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ return -ENOMEM;
+ }
+
+ /* SPQ ring */
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_SINGLE,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ &p_phys,
+ GFP_KERNEL);
+
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+ p_hwfn->p_spq = p_spq;
+
+ return 0;
+
+spq_allocate_fail:
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ kfree(p_spq);
+ return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ p_spq->p_virt,
+ p_spq->p_phys);
+
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ ;
+ kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+
+ spin_lock_bh(&p_spq->lock);
+
+ if (list_empty(&p_spq->free_pool)) {
+ p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+ if (!p_ent) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_ent->list);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ spin_unlock_bh(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+
+ if (list_empty(&p_spq->free_pool)) {
+ list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return 0;
+ } else {
+ struct qed_spq_entry *p_en2;
+
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+ *p_en2 = *p_ent;
+
+ kfree(p_ent);
+
+ p_ent = p_en2;
+ }
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case QED_SPQ_PRIORITY_NORMAL:
+ list_add_tail(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case QED_SPQ_PRIORITY_HIGH:
+ list_add(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ struct list_head *head,
+ u32 keep_reserve)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ int rc;
+
+ while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !list_empty(head)) {
+ struct qed_spq_entry *p_ent =
+ list_first_entry(head, struct qed_spq_entry, list);
+ list_del(&p_ent->list);
+ list_add_tail(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ list_del(&p_ent->list);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+
+ while (!list_empty(&p_spq->free_pool)) {
+ if (list_empty(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = list_first_entry(&p_spq->unlimited_pending,
+ struct qed_spq_entry,
+ list);
+ if (!p_ent)
+ return -EINVAL;
+
+ list_del(&p_ent->list);
+
+ qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return qed_spq_post_list(p_hwfn, &p_spq->pending,
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ int rc = 0;
+ struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* Complete the entry */
+ rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+ spin_lock_bh(&p_spq->lock);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = qed_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ spin_unlock_bh(&p_spq->lock);
+
+ if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ /* For entries in QED BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ qed_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+ qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct qed_spq *p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+ int rc;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return -EINVAL;
+
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+ list) {
+ if (p_ent->elem.hdr.echo == echo) {
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+
+ list_del(&p_ent->list);
+
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ bitmap_clear(p_spq->p_comp_bitmap,
+ p_spq->comp_bitmap_idx,
+ SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+
+ /* This is relatively uncommon - depends on scenarios
+ * which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
+ le16_to_cpu(echo),
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ spin_unlock_bh(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an entry this EQE completes\n");
+ return -EEXIST;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for freeing its own entry */
+ qed_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ return p_consq;
+
+consq_allocate_fail:
+ qed_consq_free(p_hwfn, p_consq);
+ return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ if (!p_consq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+ kfree(p_consq);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qede/Makefile b/kernel/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 000000000..06ff90d87
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/kernel/drivers/net/ethernet/qlogic/qede/qede.h b/kernel/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 000000000..ea00d5f3b
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION 8
+#define QEDE_MINOR_VERSION 4
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+ __stringify(QEDE_MINOR_VERSION) "." \
+ __stringify(QEDE_REVISION_VERSION) "." \
+ __stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION 300
+
+#define DRV_MODULE_SYM qede
+
+struct qede_stats {
+ u64 no_buff_discards;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 coalesced_pkts;
+ u64 coalesced_events;
+ u64 coalesced_aborts_num;
+ u64 non_coalesced_pkts;
+ u64 coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+ struct qed_dev *cdev;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ u32 dp_module;
+ u8 dp_level;
+
+ const struct qed_eth_ops *ops;
+
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+ struct qede_fastpath *fp_array;
+ u16 num_rss;
+ u8 num_tc;
+#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
+ (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx) \
+ (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (edev), (txqidx))])
+
+ struct qed_int_info int_info;
+ unsigned char primary_mac[ETH_ALEN];
+
+ /* Smaller private varaiant of the RTNL lock */
+ struct mutex qede_lock;
+ u32 state; /* Protected by qede_lock */
+ u16 rx_buf_size;
+ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+ /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define QEDE_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+ struct qede_stats stats;
+ struct qed_update_vport_rss_params rss_params;
+ u16 q_num_rx_buffers; /* Must be a power of two */
+ u16 q_num_tx_buffers; /* Must be a power of two */
+
+ struct delayed_work sp_task;
+ unsigned long sp_flags;
+};
+
+enum QEDE_STATE {
+ QEDE_STATE_CLOSED,
+ QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define MAX_NUM_TC 8
+#define MAX_NUM_PRI 8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+ u8 *data;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+ __le16 *hw_cons_ptr;
+ struct sw_rx_data *sw_rx_ring;
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring;
+ void __iomem *hw_rxq_prod_addr;
+
+ int rx_buf_size;
+
+ u16 num_rx_buffers;
+ u16 rxq_id;
+
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ u32 raw;
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD BIT(0)
+};
+
+struct qede_tx_queue {
+ int index; /* Queue index */
+ __le16 *hw_cons_ptr;
+ struct sw_tx_bd *sw_tx_ring;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ struct qed_chain tx_pbl;
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ u16 num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
+ le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
+ (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
+ (bd)->nbytes = cpu_to_le16(len); \
+ } while (0)
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+ struct qede_dev *edev;
+ u8 rss_id;
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs;
+
+#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+
+#define QEDE_CSUM_ERROR BIT(0)
+#define QEDE_CSUM_UNNECESSARY BIT(1)
+
+#define QEDE_SP_RX_MODE 1
+
+union qede_reload_args {
+ u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *edev,
+ union qede_reload_args *args),
+ union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW 13
+#define RX_RING_SIZE BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW 13
+#define TX_RING_SIZE BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+
+#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/kernel/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/kernel/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 000000000..3a362476a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rx_hw_errors),
+ QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+ qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+ bool pf_only;
+} qede_stats_arr[] = {
+ QEDE_STAT(rx_ucast_bytes),
+ QEDE_STAT(rx_mcast_bytes),
+ QEDE_STAT(rx_bcast_bytes),
+ QEDE_STAT(rx_ucast_pkts),
+ QEDE_STAT(rx_mcast_pkts),
+ QEDE_STAT(rx_bcast_pkts),
+
+ QEDE_STAT(tx_ucast_bytes),
+ QEDE_STAT(tx_mcast_bytes),
+ QEDE_STAT(tx_bcast_bytes),
+ QEDE_STAT(tx_ucast_pkts),
+ QEDE_STAT(tx_mcast_pkts),
+ QEDE_STAT(tx_bcast_pkts),
+
+ QEDE_PF_STAT(rx_64_byte_packets),
+ QEDE_PF_STAT(rx_127_byte_packets),
+ QEDE_PF_STAT(rx_255_byte_packets),
+ QEDE_PF_STAT(rx_511_byte_packets),
+ QEDE_PF_STAT(rx_1023_byte_packets),
+ QEDE_PF_STAT(rx_1518_byte_packets),
+ QEDE_PF_STAT(rx_1522_byte_packets),
+ QEDE_PF_STAT(rx_2047_byte_packets),
+ QEDE_PF_STAT(rx_4095_byte_packets),
+ QEDE_PF_STAT(rx_9216_byte_packets),
+ QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(tx_64_byte_packets),
+ QEDE_PF_STAT(tx_65_to_127_byte_packets),
+ QEDE_PF_STAT(tx_128_to_255_byte_packets),
+ QEDE_PF_STAT(tx_256_to_511_byte_packets),
+ QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+ QEDE_PF_STAT(rx_mac_crtl_frames),
+ QEDE_PF_STAT(tx_mac_ctrl_frames),
+ QEDE_PF_STAT(rx_pause_frames),
+ QEDE_PF_STAT(tx_pause_frames),
+ QEDE_PF_STAT(rx_pfc_frames),
+ QEDE_PF_STAT(tx_pfc_frames),
+
+ QEDE_PF_STAT(rx_crc_errors),
+ QEDE_PF_STAT(rx_align_errors),
+ QEDE_PF_STAT(rx_carrier_errors),
+ QEDE_PF_STAT(rx_oversize_packets),
+ QEDE_PF_STAT(rx_jabbers),
+ QEDE_PF_STAT(rx_undersize_packets),
+ QEDE_PF_STAT(rx_fragments),
+ QEDE_PF_STAT(tx_lpi_entry_count),
+ QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_STAT(brb_truncates),
+ QEDE_PF_STAT(brb_discards),
+ QEDE_STAT(no_buff_discards),
+ QEDE_PF_STAT(mftag_filter_discards),
+ QEDE_PF_STAT(mac_filter_discards),
+ QEDE_STAT(tx_err_drop_pkts),
+
+ QEDE_STAT(coalesced_pkts),
+ QEDE_STAT(coalesced_events),
+ QEDE_STAT(coalesced_aborts_num),
+ QEDE_STAT(non_coalesced_pkts),
+ QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+ (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+ + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+ int i, j, k;
+
+ for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_stats_arr[i].string);
+ j++;
+ }
+
+ for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qede_get_strings_stats(edev, buf);
+ break;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int sidx, cnt = 0;
+ int qid;
+
+ qede_fill_by_demand_stats(edev);
+
+ mutex_lock(&edev->qede_lock);
+
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+ buf[cnt] = 0;
+ for (qid = 0; qid < edev->num_rss; qid++)
+ buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+ cnt++;
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int num_stats = QEDE_NUM_STATS;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return num_stats + QEDE_NUM_RQSTATS;
+
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ return -EINVAL;
+ }
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ cmd->supported = current_link.supported_caps;
+ cmd->advertising = current_link.advertised_caps;
+ if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+ ethtool_cmd_speed_set(cmd, current_link.speed);
+ cmd->duplex = current_link.duplex;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ }
+ cmd->port = current_link.port;
+ cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+ cmd->lp_advertising = current_link.lp_caps;
+
+ return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+ u32 speed;
+
+ if (edev->dev_info.common.is_mf) {
+ DP_INFO(edev,
+ "Link parameters can not be changed in MF mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&current_link, 0, sizeof(current_link));
+ memset(&params, 0, sizeof(params));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ speed = ethtool_cmd_speed(cmd);
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ params.autoneg = true;
+ params.forced_speed = 0;
+ params.adv_speeds = cmd->advertising;
+ } else { /* forced speed */
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+ params.autoneg = false;
+ params.forced_speed = speed;
+ switch (speed) {
+ case SPEED_10000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_10000baseKR_Full)) {
+ DP_INFO(edev, "10G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ break;
+ case SPEED_40000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_40000baseLR4_Full)) {
+ DP_INFO(edev, "40G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ DP_INFO(edev, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+ }
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ strlcpy(info->driver, "qede", sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+ snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ edev->dev_info.common.fw_major,
+ edev->dev_info.common.fw_minor,
+ edev->dev_info.common.fw_rev,
+ edev->dev_info.common.fw_eng);
+
+ snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+ edev->dev_info.common.mfw_rev & 0xFF);
+
+ if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
+ sizeof(info->fw_version)) {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mfw %s storm %s", mfw, storm);
+ } else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %s", mfw, storm);
+ }
+
+ strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+ edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(level, &dp_module, &dp_level);
+
+ edev->dp_level = dp_level;
+ edev->dp_module = dp_module;
+ edev->ops->common->update_msglvl(edev->cdev,
+ dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+ edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define ETH_MIN_PACKET_SIZE 60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ union qede_reload_args args;
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ DP_ERR(edev, "Can't support requested MTU size\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Configuring MTU size of %d\n", new_mtu);
+
+ /* Set the mtu field and re-start the interface if needed*/
+ args.mtu = new_mtu;
+
+ if (netif_running(edev->ndev))
+ qede_reload(edev, &qede_update_mtu, &args);
+
+ qede_update_mtu(edev, &args);
+
+ return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .set_settings = qede_set_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c b/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 000000000..f4657a2e7
--- /dev/null
+++ b/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+ DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_MF 0x1636
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_57980S_25 0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+ .name = "qede",
+ .id_table = qede_pci_tbl,
+ .probe = qede_probe,
+ .remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+ {
+ .link_update = qede_link_update,
+ },
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_drvinfo drvinfo;
+ struct qede_dev *edev;
+
+ /* Currently only support name change */
+ if (event != NETDEV_CHANGENAME)
+ goto done;
+
+ /* Check whether this is a qede device */
+ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+ goto done;
+
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+ if (strcmp(drvinfo.driver, "qede"))
+ goto done;
+ edev = netdev_priv(ndev);
+
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+ "qede");
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+ .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+ int ret;
+ u32 qed_ver;
+
+ pr_notice("qede_init: %s\n", version);
+
+ qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+ if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
+ pr_notice("Version mismatch [%08x != %08x]\n",
+ qed_ver,
+ QEDE_ETH_INTERFACE_VERSION);
+ return -EINVAL;
+ }
+
+ qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ if (!qed_ops) {
+ pr_notice("Failed to get qed ethtool operations\n");
+ return -EINVAL;
+ }
+
+ /* Must register notifier before pci ops, since we might miss
+ * interface rename after pci probe and netdev registeration.
+ */
+ ret = register_netdevice_notifier(&qede_netdev_notifier);
+ if (ret) {
+ pr_notice("Failed to register netdevice_notifier\n");
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ ret = pci_register_driver(&qede_pci_driver);
+ if (ret) {
+ pr_notice("Failed to register driver\n");
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+ pr_notice("qede_cleanup called\n");
+
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ pci_unregister_driver(&qede_pci_driver);
+ qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ int *len)
+{
+ u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd,
+ bool data_split)
+{
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(&edev->pdev->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+ struct sk_buff *skb,
+ int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits = 0, bd2_bits2 = 0;
+
+ bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd) {
+ third_bd->data.bitfields |=
+ ((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+ }
+
+ second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+ skb_frag_t *frag,
+ struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ txq = QEDE_TX_QUEUE(edev, txq_index);
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ /* Current code doesn't support SKB linearization, since the max number
+ * of skb frags can be passed in the FW HSI.
+ */
+ BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+ (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ }
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod++;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+ u8 tc;
+
+ for (tc = 0; tc < fp->edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ return true;
+ return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *sw_rx_data_cons =
+ &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ struct sw_rx_data *sw_rx_data_prod =
+ &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(sw_rx_data_cons, mapping));
+
+ sw_rx_data_prod->data = sw_rx_data_cons->data;
+ memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+ rxq->sw_rx_cons++;
+ rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+ u8 bitfields,
+ __le32 rss_hash,
+ enum pkt_hash_types *rxhash_type)
+{
+ enum rss_hash_type htype;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+ if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+ *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ return le32_to_cpu(rss_hash);
+ }
+ *rxhash_type = PKT_HASH_TYPE_NONE;
+ return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_dev *edev = fp->edev;
+ struct qede_rx_queue *rxq = fp->rxq;
+
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+ int rx_pkt = 0;
+ u8 csum_flag;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while (sw_comp_cons != hw_comp_cons) {
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ enum pkt_hash_types rxhash_type;
+ enum eth_rx_cqe_type cqe_type;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ struct sk_buff *skb;
+ u16 len, pad;
+ u32 rx_hash;
+ u8 *data;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)
+ qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ edev->ops->eth_cqe_completion(
+ edev->cdev, fp->rss_id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ data = sw_rx_data->data;
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->pkt_len);
+ pad = fp_cqe->placement_offset;
+
+ /* For every Rx BD consumed, we allocate a new BD so the BD ring
+ * is always with a fixed size. If allocation fails, we take the
+ * consumed BD and return it to the ring in the PROD position.
+ * The packet that was received on that BD will be dropped (and
+ * not passed to the upper stack).
+ */
+ if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(sw_rx_data, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ /* If this is an error packet then drop it */
+ parse_flag =
+ le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+ csum_flag = qede_check_csum(parse_flag);
+ if (csum_flag == QEDE_CSUM_ERROR) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ kfree(data);
+ goto next_rx;
+ }
+
+ skb = build_skb(data, 0);
+
+ if (unlikely(!skb)) {
+ DP_NOTICE(edev,
+ "Build_skb failed, dropping incoming packet\n");
+ kfree(data);
+ rxq->rx_alloc_errors++;
+ goto next_rx;
+ }
+
+ skb_reserve(skb, pad);
+
+ } else {
+ DP_NOTICE(edev,
+ "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+ qede_reuse_rx_data(rxq);
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
+
+ sw_rx_data->data = NULL;
+
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+
+ rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+ fp_cqe->rss_hash,
+ &rxhash_type);
+
+ skb_set_hash(skb, rx_hash, rxhash_type);
+
+ qede_set_skb_csum(skb, csum_flag);
+
+ skb_record_rx_queue(skb, fp->rss_id);
+
+ qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+ rxq->sw_rx_cons++;
+ rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ /* CR TPA - revisit how to handle budget in TPA perhaps
+ * increase on "end"
+ */
+ if (rx_pkt == budget)
+ break;
+ } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+
+ while (1) {
+ u8 tc;
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ if (qede_has_rx_work(fp->rxq)) {
+ work_done += qede_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+ struct qed_eth_stats stats;
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+ edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+ edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+ edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+ edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+ edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+ edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+ edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+ edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+ edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+ edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+ edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+ edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+ edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+ edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+ edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+ edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+ edev->stats.coalesced_events = stats.tpa_coalesced_events;
+ edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+ edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+ edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+ edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+ edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+ edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+ edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+ edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+ edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+ edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+ edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+ edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+ edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+ edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_crc_errors = stats.rx_crc_errors;
+ edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+ edev->stats.rx_pause_frames = stats.rx_pause_frames;
+ edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+ edev->stats.rx_align_errors = stats.rx_align_errors;
+ edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+ edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+ edev->stats.rx_jabbers = stats.rx_jabbers;
+ edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+ edev->stats.rx_fragments = stats.rx_fragments;
+ edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+ edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+ edev->stats.tx_128_to_255_byte_packets =
+ stats.tx_128_to_255_byte_packets;
+ edev->stats.tx_256_to_511_byte_packets =
+ stats.tx_256_to_511_byte_packets;
+ edev->stats.tx_512_to_1023_byte_packets =
+ stats.tx_512_to_1023_byte_packets;
+ edev->stats.tx_1024_to_1518_byte_packets =
+ stats.tx_1024_to_1518_byte_packets;
+ edev->stats.tx_1519_to_2047_byte_packets =
+ stats.tx_1519_to_2047_byte_packets;
+ edev->stats.tx_2048_to_4095_byte_packets =
+ stats.tx_2048_to_4095_byte_packets;
+ edev->stats.tx_4096_to_9216_byte_packets =
+ stats.tx_4096_to_9216_byte_packets;
+ edev->stats.tx_9217_to_16383_byte_packets =
+ stats.tx_9217_to_16383_byte_packets;
+ edev->stats.tx_pause_frames = stats.tx_pause_frames;
+ edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+ edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+ edev->stats.tx_total_collisions = stats.tx_total_collisions;
+ edev->stats.brb_truncates = stats.brb_truncates;
+ edev->stats.brb_discards = stats.brb_discards;
+ edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+ struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ qede_fill_by_demand_stats(edev);
+
+ stats->rx_packets = edev->stats.rx_ucast_pkts +
+ edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+ stats->tx_packets = edev->stats.tx_ucast_pkts +
+ edev->stats.tx_mcast_pkts +
+ edev->stats.tx_bcast_pkts;
+
+ stats->rx_bytes = edev->stats.rx_ucast_bytes +
+ edev->stats.rx_mcast_bytes +
+ edev->stats.rx_bcast_bytes;
+
+ stats->tx_bytes = edev->stats.tx_ucast_bytes +
+ edev->stats.tx_mcast_bytes +
+ edev->stats.tx_bcast_bytes;
+
+ stats->tx_errors = edev->stats.tx_err_drop_pkts;
+ stats->multicast = edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+
+ stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+ stats->collisions = edev->stats.tx_total_collisions;
+ stats->rx_crc_errors = edev->stats.rx_crc_errors;
+ stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+ return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+ struct pci_dev *pdev,
+ struct qed_dev_eth_info *info,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct net_device *ndev;
+ struct qede_dev *edev;
+
+ ndev = alloc_etherdev_mqs(sizeof(*edev),
+ info->num_queues,
+ info->num_queues);
+ if (!ndev) {
+ pr_err("etherdev allocation failed\n");
+ return NULL;
+ }
+
+ edev = netdev_priv(ndev);
+ edev->ndev = ndev;
+ edev->cdev = cdev;
+ edev->pdev = pdev;
+ edev->dp_module = dp_module;
+ edev->dp_level = dp_level;
+ edev->ops = qed_ops;
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, info, sizeof(*info));
+
+ edev->num_tc = edev->dev_info.num_tc;
+
+ return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+ struct net_device *ndev = edev->ndev;
+ struct pci_dev *pdev = edev->pdev;
+ u32 hw_features;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ndev->mem_start = edev->dev_info.common.pci_mem_start;
+ ndev->base_addr = ndev->mem_start;
+ ndev->mem_end = edev->dev_info.common.pci_mem_end;
+ ndev->irq = edev->dev_info.common.pci_irq;
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ ndev->netdev_ops = &qede_netdev_ops;
+
+ qede_set_ethtool_ops(ndev);
+
+ /* user-changeble features */
+ hw_features = NETIF_F_GRO | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+ ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ ndev->hw_features = hw_features;
+
+ /* Set network device HW mac */
+ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+ *p_dp_level = QED_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & QED_LOG_VERBOSE_MASK) {
+ *p_dp_level = QED_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & QED_LOG_INFO_MASK) {
+ *p_dp_level = QED_LEVEL_INFO;
+ } else if (debug & QED_LOG_NOTICE_MASK) {
+ *p_dp_level = QED_LEVEL_NOTICE;
+ }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+ if (edev->fp_array) {
+ struct qede_fastpath *fp;
+ int i;
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ kfree(fp->sb_info);
+ kfree(fp->rxq);
+ kfree(fp->txqs);
+ }
+ kfree(edev->fp_array);
+ }
+ edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+ struct qede_fastpath *fp;
+ int i;
+
+ edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ sizeof(*edev->fp_array), GFP_KERNEL);
+ if (!edev->fp_array) {
+ DP_NOTICE(edev, "fp array allocation failed\n");
+ goto err;
+ }
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ DP_NOTICE(edev, "sb info struct allocation failed\n");
+ goto err;
+ }
+
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev, "RXQ struct allocation failed\n");
+ goto err;
+ }
+
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev, "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ qede_free_fp_array(edev);
+ return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ sp_task.work);
+ mutex_lock(&edev->qede_lock);
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ qede_config_rx_mode(edev->ndev);
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+ struct qed_pf_params pf_params;
+
+ /* 16 rx + 16 tx */
+ memset(&pf_params, 0, sizeof(struct qed_pf_params));
+ pf_params.eth_pf_params.num_cons = 32;
+ qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+ QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ enum qede_probe_mode mode)
+{
+ struct qed_slowpath_params params;
+ struct qed_dev_eth_info dev_info;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+ int rc;
+
+ if (unlikely(dp_level & QED_LEVEL_INFO))
+ pr_notice("Starting qede probe\n");
+
+ cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+ dp_module, dp_level);
+ if (!cdev) {
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ qede_update_pf_params(cdev);
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = QED_INT_MODE_MSIX;
+ params.drv_major = QEDE_MAJOR_VERSION;
+ params.drv_minor = QEDE_MINOR_VERSION;
+ params.drv_rev = QEDE_REVISION_VERSION;
+ params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &params);
+ if (rc) {
+ pr_notice("Cannot start slowpath\n");
+ goto err1;
+ }
+
+ /* Learn information crucial for qede to progress */
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto err2;
+
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ qede_init_ndev(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err3;
+ }
+
+ edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ DP_INFO(edev, "Ending successfully qede probe\n");
+
+ return 0;
+
+err3:
+ free_netdev(edev->ndev);
+err2:
+ qed_ops->common->slowpath_stop(cdev);
+err1:
+ qed_ops->common->remove(cdev);
+err0:
+ return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(debug, &dp_module, &dp_level);
+
+ return __qede_probe(pdev, dp_module, dp_level,
+ QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+ QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ unregister_netdev(ndev);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(ndev);
+
+ /* Use global ops since we've freed edev */
+ qed_ops->common->slowpath_stop(cdev);
+ qed_ops->common->remove(cdev);
+
+ pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+ int rc;
+ u16 rss_num;
+
+ /* Setup queues according to possible resources*/
+ rss_num = netif_get_num_default_rss_queues() *
+ edev->dev_info.common.num_hwfns;
+
+ rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+ rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+ if (rc > 0) {
+ /* Managed to request interrupts for our queues */
+ edev->num_rss = rc;
+ DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+ QEDE_RSS_CNT(edev), rss_num);
+ rc = 0;
+ }
+ return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info)
+{
+ if (sb_info->sb_virt)
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+ sizeof(*sb_virt),
+ &sb_phys, GFP_KERNEL);
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 i;
+
+ for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+ struct sw_rx_data *rx_buf;
+ u8 *data;
+
+ rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+ data = rx_buf->data;
+
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->data = NULL;
+ kfree(data);
+ }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ /* Free rx buffers */
+ qede_free_rx_buffers(edev, rxq);
+
+ /* Free the parallel SW ring */
+ kfree(rxq->sw_rx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ u16 rx_buf_size;
+ u8 *data;
+
+ rx_buf_size = rxq->rx_buf_size;
+
+ data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ if (unlikely(!data)) {
+ DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(&edev->pdev->dev, data,
+ rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ kfree(data);
+ DP_NOTICE(edev, "Failed to map Rx buffer\n");
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->data = data;
+
+ dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ int i, rc, size, num_allocated;
+
+ rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+ rxq->rx_buf_size = NET_IP_ALIGN +
+ ETH_OVERHEAD +
+ edev->ndev->mtu +
+ QEDE_FW_RX_ALIGN_END;
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ NUM_RX_BDS_MAX,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring);
+
+ if (rc)
+ goto err;
+
+ /* Allocate FW completion ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ NUM_RX_BDS_MAX,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring);
+ if (rc)
+ goto err;
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->num_rx_buffers; i++) {
+ rc = qede_alloc_rx_buffer(edev, rxq);
+ if (rc)
+ break;
+ }
+ num_allocated = i;
+ if (!num_allocated) {
+ DP_ERR(edev, "Rx buffers allocation failed\n");
+ goto err;
+ } else if (num_allocated < rxq->num_rx_buffers) {
+ DP_NOTICE(edev,
+ "Allocated less buffers than desired (%d allocated)\n",
+ num_allocated);
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_rxq(edev, rxq);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ /* Free the parallel SW ring */
+ kfree(txq->sw_tx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ int size, rc;
+ union eth_tx_bd_types *p_virt;
+
+ txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+ /* Allocate the parallel driver ring for Tx buffers */
+ size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring) {
+ DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ NUM_TX_BDS_MAX,
+ sizeof(*p_virt),
+ &txq->tx_pbl);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ qede_free_mem_txq(edev, txq);
+ return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int tc;
+
+ qede_free_mem_sb(edev, fp->sb_info);
+
+ qede_free_mem_rxq(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int rc, tc;
+
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+ if (rc)
+ goto err;
+
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ if (rc)
+ goto err;
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_fp(edev, fp);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ qede_free_mem_fp(edev, fp);
+ }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+ int rc = 0, rss_id;
+
+ for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+ rc = qede_alloc_mem_fp(edev, fp);
+ if (rc)
+ break;
+ }
+
+ if (rss_id != QEDE_RSS_CNT(edev)) {
+ /* Failed allocating memory for all the queues */
+ if (!rss_id) {
+ DP_ERR(edev,
+ "Failed to allocate memory for the leading queue\n");
+ rc = -ENOMEM;
+ } else {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+ QEDE_RSS_CNT(edev), rss_id);
+ }
+ edev->num_rss = rss_id;
+ }
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+ int rss_id, txq_index, tc;
+ struct qede_fastpath *fp;
+
+ for_each_rss(rss_id) {
+ fp = &edev->fp_array[rss_id];
+
+ fp->edev = edev;
+ fp->rss_id = rss_id;
+
+ memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+ memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rss_id;
+
+ memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+ fp->txqs[tc].index = txq_index;
+ }
+
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, rss_id);
+ }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+
+ netif_napi_del(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rss(i) {
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+ qede_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < edev->int_info.used_cnt; i++) {
+ if (edev->int_info.msix_cnt) {
+ synchronize_irq(edev->int_info.msix[i].vector);
+ free_irq(edev->int_info.msix[i].vector,
+ &edev->fp_array[i]);
+ } else {
+ edev->ops->common->simd_handler_clean(edev->cdev, i);
+ }
+ }
+
+ edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+ int i, rc;
+
+ /* Sanitize number of interrupts == number of prepared RSS queues */
+ if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ DP_ERR(edev,
+ "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+ QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ rc = request_irq(edev->int_info.msix[i].vector,
+ qede_msix_fp_int, 0, edev->fp_array[i].name,
+ &edev->fp_array[i]);
+ if (rc) {
+ DP_ERR(edev, "Request fp %d irq failed\n", i);
+ qede_sync_free_irqs(edev);
+ return rc;
+ }
+ DP_VERBOSE(edev, NETIF_MSG_INTR,
+ "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+ edev->fp_array[i].name, i,
+ &edev->fp_array[i]);
+ edev->int_info.used_cnt++;
+ }
+
+ return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+ struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+ napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+ int i, rc = 0;
+
+ /* Learn Interrupt configuration */
+ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+ if (rc)
+ return rc;
+
+ if (edev->int_info.msix_cnt) {
+ rc = qede_req_msix_irqs(edev);
+ if (rc)
+ return rc;
+ edev->ndev->irq = edev->int_info.msix[0].vector;
+ } else {
+ const struct qed_common_ops *ops;
+
+ /* qed should learn receive the RSS ids and callbacks */
+ ops = edev->ops->common;
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ ops->simd_handler_config(edev->cdev,
+ &edev->fp_array[i], i,
+ qede_simd_fp_handler);
+ edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ }
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ bool allow_drain)
+{
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev,
+ "Tx queue[%d] is stuck, requesting MCP to drain\n",
+ txq->index);
+ rc = edev->ops->common->drain(edev->cdev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(edev, txq, false);
+ }
+ DP_NOTICE(edev,
+ "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+ txq->index, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qed_dev *cdev = edev->cdev;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order*/
+ for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ struct qed_stop_rxq_params rx_params;
+
+ /* Stop the Tx Queue(s)*/
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue*/
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = i;
+
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ /* Stop the vport */
+ rc = edev->ops->vport_stop(cdev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+ int rc, tc, i;
+ int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ struct qed_dev *cdev = edev->cdev;
+ struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+ struct qed_update_vport_params vport_update_params;
+ struct qed_queue_start_common_params q_params;
+
+ if (!edev->num_rss) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->vport_start(cdev, vport_id,
+ edev->ndev->mtu,
+ drop_ttl0_flg,
+ vlan_removal_en);
+
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+ vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+ dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ phys_table,
+ fp->rxq->rx_comp_ring.page_cnt,
+ &fp->rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+ return rc;
+ }
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+ int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = txq_index;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = edev->ops->q_tx_start(cdev, &q_params,
+ txq->tx_pbl.pbl.p_phys_table,
+ txq->tx_pbl.page_cnt,
+ &txq->doorbell_addr);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport_id;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* Fill struct with RSS params */
+ if (QEDE_RSS_CNT(edev) > 1) {
+ vport_update_params.update_rss_flg = 1;
+ for (i = 0; i < 128; i++)
+ rss_params->rss_ind_table[i] =
+ ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+ netdev_rss_key_fill(rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ } else {
+ memset(rss_params, 0, sizeof(*rss_params));
+ }
+ memcpy(&vport_update_params.rss_params, rss_params,
+ sizeof(*rss_params));
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_params filter_cmd;
+ int i;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_MCAST;
+ filter_cmd.filter.mcast.type = opcode;
+ filter_cmd.filter.mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+ QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "Starting qede unload\n");
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_CLOSED;
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+
+ edev->ops->fastpath_stop(edev->cdev);
+
+ /* Release the interrupts */
+ qede_sync_free_irqs(edev);
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+
+ qede_napi_disable_remove(edev);
+
+ qede_free_mem_load(edev);
+ qede_free_fp_array(edev);
+
+out:
+ mutex_unlock(&edev->qede_lock);
+ DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+ QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+ struct qed_link_params link_params;
+ struct qed_link_output link_output;
+ int rc;
+
+ DP_INFO(edev, "Starting qede load\n");
+
+ rc = qede_set_num_queues(edev);
+ if (rc)
+ goto err0;
+
+ rc = qede_alloc_fp_array(edev);
+ if (rc)
+ goto err0;
+
+ qede_init_fp(edev);
+
+ rc = qede_alloc_mem_load(edev);
+ if (rc)
+ goto err1;
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(edev), edev->num_tc);
+
+ rc = qede_set_real_num_queues(edev);
+ if (rc)
+ goto err2;
+
+ qede_napi_add_enable(edev);
+ DP_INFO(edev, "Napi added and enabled\n");
+
+ rc = qede_setup_irqs(edev);
+ if (rc)
+ goto err3;
+ DP_INFO(edev, "Setup IRQs succeeded\n");
+
+ rc = qede_start_queues(edev);
+ if (rc)
+ goto err4;
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ /* Add primary mac and set Rx filters */
+ ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_OPEN;
+ mutex_unlock(&edev->qede_lock);
+
+ /* Ask for link-up using current configuration */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Query whether link is already-up */
+ memset(&link_output, 0, sizeof(link_output));
+ edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_link_update(edev, &link_output);
+
+ DP_INFO(edev, "Ending successfully qede load\n");
+
+ return 0;
+
+err4:
+ qede_sync_free_irqs(edev);
+ memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+ qede_napi_disable_remove(edev);
+err2:
+ qede_free_mem_load(edev);
+err1:
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+ qede_free_fp_array(edev);
+ edev->num_rss = 0;
+err0:
+ return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *, union qede_reload_args *),
+ union qede_reload_args *args)
+{
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ /* Call function handler to update parameters
+ * needed for function load.
+ */
+ if (func)
+ func(edev, args);
+
+ qede_load(edev, QEDE_LOAD_NORMAL);
+
+ mutex_lock(&edev->qede_lock);
+ qede_config_rx_mode(edev->ndev);
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+
+ edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+ return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+ return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qede_dev *edev = dev;
+
+ if (!netif_running(edev->ndev)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ return;
+ }
+
+ if (link->link_up) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ } else {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc;
+
+ ASSERT_RTNL(); /* @@@TBD To be removed */
+
+ DP_INFO(edev, "Set_mac_addr called\n");
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ return -EFAULT;
+ }
+
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+ if (!netif_running(ndev)) {
+ DP_NOTICE(edev, "The device is currently down\n");
+ return 0;
+ }
+
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ edev->primary_mac);
+ if (rc)
+ return rc;
+
+ /* Add MAC filter according to the new unicast HW MAC address */
+ ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+ return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count < 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_INFO(edev, "qede_set_rx_mode called\n");
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_INFO(edev,
+ "qede_set_rx_mode called while interface is down\n");
+ } else {
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_filter_params rx_mode;
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configrue / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->primary_mac);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if ((ndev->flags & IFF_PROMISC) ||
+ (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ } else {
+ /* Add MAC filters according to the unicast secondary macs */
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+ }
+
+ rx_mode.filter.accept_flags = accept_flags;
+ edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+ kfree(uc_macs);
+}
diff --git a/kernel/drivers/net/ethernet/qlogic/qla3xxx.c b/kernel/drivers/net/ethernet/qlogic/qla3xxx.c
index 484771321..b09a6b80d 100644
--- a/kernel/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/kernel/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,8 +1736,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static u32 ql_get_msglevel(struct net_device *ndev)
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f221126a5..46bbea8e0 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
#include <linux/mii.h>
#include <linux/timer.h>
#include <linux/irq.h>
-
#include <linux/vmalloc.h>
-
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
@@ -39,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID "5.3.63"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -538,6 +536,7 @@ struct qlcnic_hardware_context {
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
+ u8 vxlan_port_count;
u16 vxlan_port;
struct device *hwmon_dev;
u32 post_mode;
@@ -926,6 +925,7 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13
#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
@@ -1092,7 +1092,7 @@ struct qlcnic_filter_hash {
struct qlcnic_mailbox {
struct workqueue_struct *work_q;
struct qlcnic_adapter *adapter;
- struct qlcnic_mbx_ops *ops;
+ const struct qlcnic_mbx_ops *ops;
struct work_struct work;
struct completion completion;
struct list_head cmd_q;
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
};
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM -1
-
#define MAX_BW 100 /* % of link speed */
#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
@@ -2294,8 +2291,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30
#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2322,7 +2320,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
(device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
(device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2338,7 +2337,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2354,7 +2354,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
}
static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36b5..37a731be7 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+ {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -916,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
mbx->cmd_op = type;
@@ -3513,6 +3513,31 @@ out:
qlcnic_free_mbx_args(&cmd);
}
+#define QLCNIC_83XX_ADD_PORT0 BIT_0
+#define QLCNIC_83XX_ADD_PORT1 BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+ cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+ cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "failed to issue extend iSCSI minidump capability\n");
+
+ return err;
+}
+
int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
{
u32 major, minor, sub;
@@ -4023,7 +4048,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
work);
struct qlcnic_adapter *adapter = mbx->adapter;
- struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
@@ -4073,7 +4098,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
}
}
-static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
.enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
.dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
.decode_resp = qlcnic_83xx_decode_mbx_rsp,
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828eb4..331ae2c20 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/etherdevice.h>
+
#include "qlcnic_hw.h"
#define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b..bf892160d 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
size_t size;
u64 addr;
- temp = kzalloc(fw->size, GFP_KERNEL);
+ temp = vzalloc(fw->size);
if (!temp) {
release_firmware(fw);
fw_info->fw = NULL;
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (fw->size & 0xF) {
addr = dest + size;
for (i = 0; i < (fw->size & 0xF); i++)
- data[i] = temp[size + i];
+ data[i] = ((u8 *)temp)[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
exit:
release_firmware(fw);
fw_info->fw = NULL;
- kfree(temp);
+ vfree(temp);
return ret;
}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62c..34906750b 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,12 +246,13 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
u32 state;
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
- while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
+ idc->vnic_wait_limit--;
msleep(1000);
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
}
- if (!idc->vnic_wait_limit) {
+ if (state != QLCNIC_DEV_NPAR_OPER) {
dev_err(&adapter->pdev->dev,
"vNIC mode not operational, state check timed out.\n");
return -EIO;
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 6e6f18fc5..daf05155b 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = type;
break;
}
@@ -774,8 +772,10 @@ int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
int i, err = 0;
for (i = 0; i < ahw->num_msix; i++) {
- qlcnic_alloc_mbx_args(&cmd, adapter,
- QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ if (err)
+ return err;
type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
val = type | (ahw->intr_tbl[i].type << 4);
if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4ce..509b596cf 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
#include <linux/slab.h>
#include <net/ip.h>
#include <linux/bitops.h>
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
#define MASK(n) ((1ULL<<(n))-1)
#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399c3..4bb33af8e 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37
#define QLCNIC_INTRPT_INTX 1
#define QLCNIC_INTRPT_MSIX 3
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 367f3976d..1205f6f9c 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
#include <net/vxlan.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static const struct pci_device_id qlcnic_pci_tbl[] = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
- ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
@@ -353,7 +353,8 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data) &&
+ ether_addr_equal_unaligned(netdev->dev_addr, addr->sa_data))
return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -483,11 +484,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
- if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ if (!qlcnic_encap_rx_offload(adapter))
+ return;
+ if (!ahw->vxlan_port_count) {
+ ahw->vxlan_port_count = 1;
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
return;
+ }
+ if (ahw->vxlan_port == ntohs(port))
+ ahw->vxlan_port_count++;
- ahw->vxlan_port = ntohs(port);
- adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +503,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
(ahw->vxlan_port != ntohs(port)))
return;
- adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+ ahw->vxlan_port_count--;
+ if (!ahw->vxlan_port_count)
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
@@ -1031,7 +1040,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
pfn = pci_info[i].id;
if (pfn >= ahw->max_vnic_func) {
- ret = QL_STATUS_INVALID_PARAM;
+ ret = -EINVAL;
dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
__func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
@@ -1149,6 +1158,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -2403,7 +2413,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
qlcnic_free_tx_rings(adapter);
return -ENOMEM;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
spin_lock_init(&tx_ring->tx_clean_lock);
}
@@ -2492,6 +2501,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a3f..cda9e604a 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <net/ip.h>
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
#include "qlcnic_83xx_hw.h"
#include "qlcnic_hw.h"
-#include <net/ip.h>
-
#define QLC_83XX_MINIDUMP_FLASH 0x520000
#define QLC_83XX_OCM_INDEX 3
#define QLC_83XX_PCI_INDEX 0
@@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
netdev_info(adapter->netdev,
- "Dump data %d bytes captured, template header size %d bytes\n",
- fw_dump->size, fw_dump->tmpl_hdr_size);
+ "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+ fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+ fw_dump->tmpl_hdr);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
return 0;
}
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+ /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+ * dump needs to be captured as part of regular firmware dump
+ * collection process, firmware exports it's capability through
+ * capability registers
+ */
+ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+ (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
{
u32 prev_version, current_version;
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev;
+ bool extended = false;
prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter);
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
vfree(fw_dump->tmpl_hdr);
+
+ if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ extended = !qlcnic_83xx_extend_md_capab(adapter);
+
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+ /* Once we have minidump template with extended iSCSI dump
+ * capability, update the minidump capture mask to 0x1f as
+ * per FW requirement
+ */
+ if (extended) {
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ hdr->drv_cap_mask = 0x1f;
+ fw_dump->cap_mask = 0x1f;
+ dev_info(&pdev->dev,
+ "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+ }
}
}
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2edc..017d8c2c8 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
#ifndef _QLCNIC_83XX_SRIOV_H_
#define _QLCNIC_83XX_SRIOV_H_
-#include "qlcnic.h"
#include <linux/types.h>
#include <linux/pci.h>
+#include "qlcnic.h"
+
extern const u32 qlcnic_83xx_reg_tbl[];
extern const u32 qlcnic_83xx_ext_reg_tbl[];
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e6312465f..7327b729b 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
#define QLC_BC_COMMAND 0
#define QLC_BC_RESPONSE 1
@@ -728,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b86..afd687e5e 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
-#include <linux/types.h>
#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721fba..ccbb04503 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -24,7 +20,8 @@
#include <linux/hwmon-sysfs.h>
#endif
-#define QLC_STATUS_UNSUPPORTED_CMD -2
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
@@ -166,7 +163,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
u8 b_state, b_rate;
if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +380,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
@@ -414,7 +411,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +424,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +437,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -499,11 +496,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +514,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
default:
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
}
@@ -559,7 +556,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +567,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -604,7 +601,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +651,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
@@ -669,11 +666,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -694,7 +691,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +714,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -784,13 +781,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +816,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +850,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +880,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +950,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
- return QL_STATUS_INVALID_PARAM;
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
count = size / sizeof(u32);
@@ -1132,9 +1127,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
-
ret = kstrtoul(buf, 16, &data);
switch (data) {
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index c3c514e33..5dade1fd0 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -415,13 +415,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
(qdev->fw_rev_id & 0x000000ff));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
- else
- drvinfo->regdump_len = sizeof(struct ql_reg_dump);
- drvinfo->eedump_len = 0;
}
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 25800a1de..997976426 100644
--- a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3871,9 +3871,6 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
return status;
}
- end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
-
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
@@ -3888,6 +3885,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+ end_jiffies = jiffies + usecs_to_jiffies(30);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
@@ -4213,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 3;
- while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 4;
+
+ while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);